lkml.org 
[lkml]   [2022]   [May]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: riscv: llvm-compiler: calling convention violation: temporary register $t2 is used to pass the ninth function parameter
On Fri, May 13, 2022 at 12:39:55PM -0700, Nick Desaulniers wrote:
> On Thu, May 12, 2022 at 8:46 PM Changbin Du <changbin.du@huawei.com> wrote:
> >
> > On Thu, May 12, 2022 at 07:49:41AM -0700, Craig Topper wrote:
> > > Changbin, can you provide a pre-processed source and a command line? I can
> > > reduce it on the llvm side.
> > >
> >
> > hmm, clang can not build the preprocessed source by itself!
>
> Sorry, I should have provided more info.
>
> In order to get the preprocessed source, you generally can do:
>
> $ ARCH=riscv make LLVM=1 -j$(nproc) defconfig lib/string.i
>
> replace the final command line parameter with the path to the source
> file you care about.
>
> Then, to get the command line invocation, you can do:
>
> $ ARCH=riscv make LLVM=1 -j$(nproc) lib/string.o V=1
>
> Then you can provide the output of those two commands.
> --
> Thanks,
> ~Nick Desaulniers

No problem, I know this tricks. Please check attached file, and build it with:
$ /opt/llvm-13.0.0/bin/clang -nostdinc -Qunused-arguments -Wall -Wundef -Wno-trigraphs -fno-strict-aliasing -fno-common -fshort-wchar -fno-PIE -Werror=implicit-function-declaration -Werror=implicit-int -Werror=return-type -Wno-format-security -std=gnu11 --target=riscv64-linux-gnu -fintegrated-as -Werror=unknown-warning-option -Werror=ignored-optimization-argument -mabi=lp64 -mno-relax -march=rv64imac -mno-save-restore -mcmodel=medany -fno-omit-frame-pointer -fno-delete-null-pointer-checks -Wno-frame-address -Wno-address-of-packed-member -O2 -Wframe-larger-than=2048 -fstack-protector-strong -Wno-gnu -Wno-unused-but-set-variable -Wno-unused-const-variable -fno-omit-frame-pointer -fno-optimize-sibling-calls -fno-stack-clash-protection -fpatchable-function-entry=8 -Wdeclaration-after-statement -Wvla -Wno-pointer-sign -Wcast-function-type -fno-strict-overflow -fno-stack-check -Werror=date-time -Werror=incompatible-pointer-types -Wno-initializer-overrides -Wno-format -Wno-sign-compare -Wno-format-zero-length -Wno-pointer-to-enum-cast -Wno-tautological-constant-out-of-range-compare -g -c -o route.o route.i

--
Cheers,
Changbin Du
# 1 "net/ipv6/route.c"
# 1 "<built-in>" 1
# 1 "<built-in>" 3
# 352 "<built-in>" 3
# 1 "<command line>" 1
# 1 "<built-in>" 2
# 1 "././include/linux/compiler-version.h" 1
# 2 "<built-in>" 2
# 1 "././include/linux/kconfig.h" 1




# 1 "./include/generated/autoconf.h" 1
# 6 "././include/linux/kconfig.h" 2
# 3 "<built-in>" 2
# 1 "././include/linux/compiler_types.h" 1
# 73 "././include/linux/compiler_types.h"
# 1 "./include/linux/compiler_attributes.h" 1
# 74 "././include/linux/compiler_types.h" 2
# 88 "././include/linux/compiler_types.h"
# 1 "./include/linux/compiler-clang.h" 1
# 89 "././include/linux/compiler_types.h" 2
# 110 "././include/linux/compiler_types.h"
struct ftrace_branch_data {
const char *func;
const char *file;
unsigned line;
union {
struct {
unsigned long correct;
unsigned long incorrect;
};
struct {
unsigned long miss;
unsigned long hit;
};
unsigned long miss_hit[2];
};
};

struct ftrace_likely_data {
struct ftrace_branch_data data;
unsigned long constant;
};
# 4 "<built-in>" 2
# 1 "net/ipv6/route.c" 2
# 25 "net/ipv6/route.c"
# 1 "./include/linux/capability.h" 1
# 16 "./include/linux/capability.h"
# 1 "./include/uapi/linux/capability.h" 1
# 17 "./include/uapi/linux/capability.h"
# 1 "./include/linux/types.h" 1





# 1 "./include/uapi/linux/types.h" 1




# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 1 "./include/uapi/asm-generic/types.h" 1






# 1 "./include/asm-generic/int-ll64.h" 1
# 11 "./include/asm-generic/int-ll64.h"
# 1 "./include/uapi/asm-generic/int-ll64.h" 1
# 12 "./include/uapi/asm-generic/int-ll64.h"
# 1 "./arch/riscv/include/uapi/asm/bitsperlong.h" 1
# 12 "./arch/riscv/include/uapi/asm/bitsperlong.h"
# 1 "./include/asm-generic/bitsperlong.h" 1




# 1 "./include/uapi/asm-generic/bitsperlong.h" 1
# 6 "./include/asm-generic/bitsperlong.h" 2
# 13 "./arch/riscv/include/uapi/asm/bitsperlong.h" 2
# 13 "./include/uapi/asm-generic/int-ll64.h" 2







typedef __signed__ char __s8;
typedef unsigned char __u8;

typedef __signed__ short __s16;
typedef unsigned short __u16;

typedef __signed__ int __s32;
typedef unsigned int __u32;


__extension__ typedef __signed__ long long __s64;
__extension__ typedef unsigned long long __u64;
# 12 "./include/asm-generic/int-ll64.h" 2




typedef __s8 s8;
typedef __u8 u8;
typedef __s16 s16;
typedef __u16 u16;
typedef __s32 s32;
typedef __u32 u32;
typedef __s64 s64;
typedef __u64 u64;
# 8 "./include/uapi/asm-generic/types.h" 2
# 2 "./arch/riscv/include/generated/uapi/asm/types.h" 2
# 6 "./include/uapi/linux/types.h" 2








# 1 "./include/uapi/linux/posix_types.h" 1




# 1 "./include/linux/stddef.h" 1




# 1 "./include/uapi/linux/stddef.h" 1
# 6 "./include/linux/stddef.h" 2




enum {
false = 0,
true = 1
};
# 6 "./include/uapi/linux/posix_types.h" 2
# 25 "./include/uapi/linux/posix_types.h"
typedef struct {
unsigned long fds_bits[1024 / (8 * sizeof(long))];
} __kernel_fd_set;


typedef void (*__kernel_sighandler_t)(int);


typedef int __kernel_key_t;
typedef int __kernel_mqd_t;


# 1 "./arch/riscv/include/generated/uapi/asm/posix_types.h" 1
# 1 "./include/uapi/asm-generic/posix_types.h" 1
# 15 "./include/uapi/asm-generic/posix_types.h"
typedef long __kernel_long_t;
typedef unsigned long __kernel_ulong_t;



typedef __kernel_ulong_t __kernel_ino_t;



typedef unsigned int __kernel_mode_t;



typedef int __kernel_pid_t;



typedef int __kernel_ipc_pid_t;



typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;



typedef __kernel_long_t __kernel_suseconds_t;



typedef int __kernel_daddr_t;



typedef unsigned int __kernel_uid32_t;
typedef unsigned int __kernel_gid32_t;



typedef __kernel_uid_t __kernel_old_uid_t;
typedef __kernel_gid_t __kernel_old_gid_t;



typedef unsigned int __kernel_old_dev_t;
# 72 "./include/uapi/asm-generic/posix_types.h"
typedef __kernel_ulong_t __kernel_size_t;
typedef __kernel_long_t __kernel_ssize_t;
typedef __kernel_long_t __kernel_ptrdiff_t;




typedef struct {
int val[2];
} __kernel_fsid_t;





typedef __kernel_long_t __kernel_off_t;
typedef long long __kernel_loff_t;
typedef __kernel_long_t __kernel_old_time_t;



typedef long long __kernel_time64_t;
typedef __kernel_long_t __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
# 2 "./arch/riscv/include/generated/uapi/asm/posix_types.h" 2
# 37 "./include/uapi/linux/posix_types.h" 2
# 15 "./include/uapi/linux/types.h" 2
# 29 "./include/uapi/linux/types.h"
typedef __u16 __le16;
typedef __u16 __be16;
typedef __u32 __le32;
typedef __u32 __be32;
typedef __u64 __le64;
typedef __u64 __be64;

typedef __u16 __sum16;
typedef __u32 __wsum;
# 52 "./include/uapi/linux/types.h"
typedef unsigned __poll_t;
# 7 "./include/linux/types.h" 2






typedef u32 __kernel_dev_t;

typedef __kernel_fd_set fd_set;
typedef __kernel_dev_t dev_t;
typedef __kernel_ulong_t ino_t;
typedef __kernel_mode_t mode_t;
typedef unsigned short umode_t;
typedef u32 nlink_t;
typedef __kernel_off_t off_t;
typedef __kernel_pid_t pid_t;
typedef __kernel_daddr_t daddr_t;
typedef __kernel_key_t key_t;
typedef __kernel_suseconds_t suseconds_t;
typedef __kernel_timer_t timer_t;
typedef __kernel_clockid_t clockid_t;
typedef __kernel_mqd_t mqd_t;

typedef _Bool bool;

typedef __kernel_uid32_t uid_t;
typedef __kernel_gid32_t gid_t;
typedef __kernel_uid16_t uid16_t;
typedef __kernel_gid16_t gid16_t;

typedef unsigned long uintptr_t;
# 46 "./include/linux/types.h"
typedef __kernel_loff_t loff_t;
# 55 "./include/linux/types.h"
typedef __kernel_size_t size_t;




typedef __kernel_ssize_t ssize_t;




typedef __kernel_ptrdiff_t ptrdiff_t;




typedef __kernel_clock_t clock_t;




typedef __kernel_caddr_t caddr_t;



typedef unsigned char u_char;
typedef unsigned short u_short;
typedef unsigned int u_int;
typedef unsigned long u_long;


typedef unsigned char unchar;
typedef unsigned short ushort;
typedef unsigned int uint;
typedef unsigned long ulong;




typedef u8 u_int8_t;
typedef s8 int8_t;
typedef u16 u_int16_t;
typedef s16 int16_t;
typedef u32 u_int32_t;
typedef s32 int32_t;



typedef u8 uint8_t;
typedef u16 uint16_t;
typedef u32 uint32_t;


typedef u64 uint64_t;
typedef u64 u_int64_t;
typedef s64 int64_t;
# 125 "./include/linux/types.h"
typedef u64 sector_t;
typedef u64 blkcnt_t;
# 143 "./include/linux/types.h"
typedef u64 dma_addr_t;




typedef unsigned int gfp_t;
typedef unsigned int slab_flags_t;
typedef unsigned int fmode_t;


typedef u64 phys_addr_t;




typedef phys_addr_t resource_size_t;





typedef unsigned long irq_hw_number_t;

typedef struct {
int counter;
} atomic_t;




typedef struct {
s64 counter;
} atomic64_t;


struct list_head {
struct list_head *next, *prev;
};

struct hlist_head {
struct hlist_node *first;
};

struct hlist_node {
struct hlist_node *next, **pprev;
};

struct ustat {
__kernel_daddr_t f_tfree;



unsigned long f_tinode;

char f_fname[6];
char f_fpack[6];
};
# 220 "./include/linux/types.h"
struct callback_head {
struct callback_head *next;
void (*func)(struct callback_head *head);
} __attribute__((aligned(sizeof(void *))));


typedef void (*rcu_callback_t)(struct callback_head *head);
typedef void (*call_rcu_func_t)(struct callback_head *head, rcu_callback_t func);

typedef void (*swap_r_func_t)(void *a, void *b, int size, const void *priv);
typedef void (*swap_func_t)(void *a, void *b, int size);

typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv);
typedef int (*cmp_func_t)(const void *a, const void *b);
# 18 "./include/uapi/linux/capability.h" 2
# 39 "./include/uapi/linux/capability.h"
typedef struct __user_cap_header_struct {
__u32 version;
int pid;
} *cap_user_header_t;

typedef struct __user_cap_data_struct {
__u32 effective;
__u32 permitted;
__u32 inheritable;
} *cap_user_data_t;
# 72 "./include/uapi/linux/capability.h"
struct vfs_cap_data {
__le32 magic_etc;
struct {
__le32 permitted;
__le32 inheritable;
} data[2];
};




struct vfs_ns_cap_data {
__le32 magic_etc;
struct {
__le32 permitted;
__le32 inheritable;
} data[2];
__le32 rootid;
};
# 17 "./include/linux/capability.h" 2
# 1 "./include/linux/uidgid.h" 1
# 16 "./include/linux/uidgid.h"
# 1 "./include/linux/highuid.h" 1
# 35 "./include/linux/highuid.h"
extern int overflowuid;
extern int overflowgid;

extern void __bad_uid(void);
extern void __bad_gid(void);
# 82 "./include/linux/highuid.h"
extern int fs_overflowuid;
extern int fs_overflowgid;
# 17 "./include/linux/uidgid.h" 2

struct user_namespace;
extern struct user_namespace init_user_ns;

typedef struct {
uid_t val;
} kuid_t;


typedef struct {
gid_t val;
} kgid_t;





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) uid_t __kuid_val(kuid_t uid)
{
return uid.val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) gid_t __kgid_val(kgid_t gid)
{
return gid.val;
}
# 61 "./include/linux/uidgid.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool uid_eq(kuid_t left, kuid_t right)
{
return __kuid_val(left) == __kuid_val(right);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool gid_eq(kgid_t left, kgid_t right)
{
return __kgid_val(left) == __kgid_val(right);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool uid_gt(kuid_t left, kuid_t right)
{
return __kuid_val(left) > __kuid_val(right);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool gid_gt(kgid_t left, kgid_t right)
{
return __kgid_val(left) > __kgid_val(right);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool uid_gte(kuid_t left, kuid_t right)
{
return __kuid_val(left) >= __kuid_val(right);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool gid_gte(kgid_t left, kgid_t right)
{
return __kgid_val(left) >= __kgid_val(right);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool uid_lt(kuid_t left, kuid_t right)
{
return __kuid_val(left) < __kuid_val(right);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool gid_lt(kgid_t left, kgid_t right)
{
return __kgid_val(left) < __kgid_val(right);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool uid_lte(kuid_t left, kuid_t right)
{
return __kuid_val(left) <= __kuid_val(right);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool gid_lte(kgid_t left, kgid_t right)
{
return __kgid_val(left) <= __kgid_val(right);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool uid_valid(kuid_t uid)
{
return __kuid_val(uid) != (uid_t) -1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool gid_valid(kgid_t gid)
{
return __kgid_val(gid) != (gid_t) -1;
}



extern kuid_t make_kuid(struct user_namespace *from, uid_t uid);
extern kgid_t make_kgid(struct user_namespace *from, gid_t gid);

extern uid_t from_kuid(struct user_namespace *to, kuid_t uid);
extern gid_t from_kgid(struct user_namespace *to, kgid_t gid);
extern uid_t from_kuid_munged(struct user_namespace *to, kuid_t uid);
extern gid_t from_kgid_munged(struct user_namespace *to, kgid_t gid);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid)
{
return from_kuid(ns, uid) != (uid_t) -1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
{
return from_kgid(ns, gid) != (gid_t) -1;
}
# 18 "./include/linux/capability.h" 2




extern int file_caps_enabled;

typedef struct kernel_cap_struct {
__u32 cap[2];
} kernel_cap_t;


struct cpu_vfs_cap_data {
__u32 magic_etc;
kernel_cap_t permitted;
kernel_cap_t inheritable;
kuid_t rootid;
};





struct file;
struct inode;
struct dentry;
struct task_struct;
struct user_namespace;

extern const kernel_cap_t __cap_empty_set;
extern const kernel_cap_t __cap_init_eff_set;
# 118 "./include/linux/capability.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kernel_cap_t cap_combine(const kernel_cap_t a,
const kernel_cap_t b)
{
kernel_cap_t dest;
do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] | b.cap[__capi]; } } while (0);
return dest;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kernel_cap_t cap_intersect(const kernel_cap_t a,
const kernel_cap_t b)
{
kernel_cap_t dest;
do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] & b.cap[__capi]; } } while (0);
return dest;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kernel_cap_t cap_drop(const kernel_cap_t a,
const kernel_cap_t drop)
{
kernel_cap_t dest;
do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] &~ drop.cap[__capi]; } } while (0);
return dest;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kernel_cap_t cap_invert(const kernel_cap_t c)
{
kernel_cap_t dest;
do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = ~ c.cap[__capi]; } } while (0);
return dest;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cap_isclear(const kernel_cap_t a)
{
unsigned __capi;
for (__capi = 0; __capi < 2; ++__capi) {
if (a.cap[__capi] != 0)
return false;
}
return true;
}
# 166 "./include/linux/capability.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
{
kernel_cap_t dest;
dest = cap_drop(a, set);
return cap_isclear(dest);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
{
const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
return cap_drop(a, __cap_fs_set);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
return cap_combine(a,
cap_intersect(permitted, __cap_fs_set));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
{
const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } });
return cap_drop(a, __cap_fs_set);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
const kernel_cap_t __cap_nfsd_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } });
return cap_combine(a,
cap_intersect(permitted, __cap_nfsd_set));
}


extern bool has_capability(struct task_struct *t, int cap);
extern bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool has_capability_noaudit(struct task_struct *t, int cap);
extern bool has_ns_capability_noaudit(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
extern bool ns_capable_setid(struct user_namespace *ns, int cap);
# 250 "./include/linux/capability.h"
bool privileged_wrt_inode_uidgid(struct user_namespace *ns,
struct user_namespace *mnt_userns,
const struct inode *inode);
bool capable_wrt_inode_uidgid(struct user_namespace *mnt_userns,
const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool perfmon_capable(void)
{
return capable(38) || capable(21);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_capable(void)
{
return capable(39) || capable(21);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool checkpoint_restore_ns_capable(struct user_namespace *ns)
{
return ns_capable(ns, 40) ||
ns_capable(ns, 21);
}


int get_vfs_caps_from_disk(struct user_namespace *mnt_userns,
const struct dentry *dentry,
struct cpu_vfs_cap_data *cpu_caps);

int cap_convert_nscap(struct user_namespace *mnt_userns, struct dentry *dentry,
const void **ivalue, size_t size);
# 26 "net/ipv6/route.c" 2
# 1 "./include/linux/errno.h" 1




# 1 "./include/uapi/linux/errno.h" 1
# 1 "./arch/riscv/include/generated/uapi/asm/errno.h" 1
# 1 "./include/uapi/asm-generic/errno.h" 1




# 1 "./include/uapi/asm-generic/errno-base.h" 1
# 6 "./include/uapi/asm-generic/errno.h" 2
# 2 "./arch/riscv/include/generated/uapi/asm/errno.h" 2
# 2 "./include/uapi/linux/errno.h" 2
# 6 "./include/linux/errno.h" 2
# 27 "net/ipv6/route.c" 2
# 1 "./include/linux/export.h" 1
# 72 "./include/linux/export.h"
struct kernel_symbol {
unsigned long value;
const char *name;
const char *namespace;
};
# 28 "net/ipv6/route.c" 2

# 1 "./include/uapi/linux/times.h" 1






struct tms {
__kernel_clock_t tms_utime;
__kernel_clock_t tms_stime;
__kernel_clock_t tms_cutime;
__kernel_clock_t tms_cstime;
};
# 30 "net/ipv6/route.c" 2
# 1 "./include/linux/socket.h" 1





# 1 "./arch/riscv/include/generated/uapi/asm/socket.h" 1
# 1 "./include/uapi/asm-generic/socket.h" 1





# 1 "./arch/riscv/include/generated/uapi/asm/sockios.h" 1
# 1 "./include/uapi/asm-generic/sockios.h" 1
# 2 "./arch/riscv/include/generated/uapi/asm/sockios.h" 2
# 7 "./include/uapi/asm-generic/socket.h" 2
# 2 "./arch/riscv/include/generated/uapi/asm/socket.h" 2
# 7 "./include/linux/socket.h" 2
# 1 "./include/uapi/linux/sockios.h" 1
# 23 "./include/uapi/linux/sockios.h"
# 1 "./arch/riscv/include/generated/uapi/asm/sockios.h" 1
# 24 "./include/uapi/linux/sockios.h" 2
# 8 "./include/linux/socket.h" 2
# 1 "./include/linux/uio.h" 1







# 1 "./include/linux/kernel.h" 1
# 14 "./include/linux/kernel.h"
# 1 "./include/linux/stdarg.h" 1




typedef __builtin_va_list va_list;
# 15 "./include/linux/kernel.h" 2
# 1 "./include/linux/align.h" 1




# 1 "./include/linux/const.h" 1



# 1 "./include/vdso/const.h" 1




# 1 "./include/uapi/linux/const.h" 1
# 6 "./include/vdso/const.h" 2
# 5 "./include/linux/const.h" 2
# 6 "./include/linux/align.h" 2
# 16 "./include/linux/kernel.h" 2
# 1 "./include/linux/limits.h" 1




# 1 "./include/uapi/linux/limits.h" 1
# 6 "./include/linux/limits.h" 2

# 1 "./include/vdso/limits.h" 1
# 8 "./include/linux/limits.h" 2
# 17 "./include/linux/kernel.h" 2
# 1 "./include/linux/linkage.h" 1





# 1 "./include/linux/stringify.h" 1
# 7 "./include/linux/linkage.h" 2

# 1 "./arch/riscv/include/asm/linkage.h" 1
# 9 "./include/linux/linkage.h" 2
# 18 "./include/linux/kernel.h" 2


# 1 "./include/linux/compiler.h" 1
# 232 "./include/linux/compiler.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *offset_to_ptr(const int *off)
{
return (void *)((unsigned long)off + *off);
}
# 248 "./include/linux/compiler.h"
# 1 "./arch/riscv/include/generated/asm/rwonce.h" 1
# 1 "./include/asm-generic/rwonce.h" 1
# 26 "./include/asm-generic/rwonce.h"
# 1 "./include/linux/kasan-checks.h" 1
# 22 "./include/linux/kasan-checks.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __kasan_check_read(const volatile void *p, unsigned int size)
{
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __kasan_check_write(const volatile void *p, unsigned int size)
{
return true;
}
# 40 "./include/linux/kasan-checks.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool kasan_check_read(const volatile void *p, unsigned int size)
{
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool kasan_check_write(const volatile void *p, unsigned int size)
{
return true;
}
# 27 "./include/asm-generic/rwonce.h" 2
# 1 "./include/linux/kcsan-checks.h" 1
# 189 "./include/linux/kcsan-checks.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __kcsan_check_access(const volatile void *ptr, size_t size,
int type) { }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __kcsan_mb(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __kcsan_wmb(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __kcsan_rmb(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __kcsan_release(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kcsan_disable_current(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kcsan_enable_current(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kcsan_enable_current_nowarn(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kcsan_nestable_atomic_begin(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kcsan_nestable_atomic_end(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kcsan_flat_atomic_begin(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kcsan_flat_atomic_end(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kcsan_atomic_next(int n) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kcsan_set_access_mask(unsigned long mask) { }

struct kcsan_scoped_access { };

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct kcsan_scoped_access *
kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
struct kcsan_scoped_access *sa) { return sa; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
# 229 "./include/linux/kcsan-checks.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kcsan_check_access(const volatile void *ptr, size_t size,
int type) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __kcsan_enable_current(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __kcsan_disable_current(void) { }
# 28 "./include/asm-generic/rwonce.h" 2
# 64 "./include/asm-generic/rwonce.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__))
unsigned long __read_once_word_nocheck(const void *addr)
{
return (*(const volatile typeof( _Generic((*(unsigned long *)addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*(unsigned long *)addr))) *)&(*(unsigned long *)addr));
}
# 82 "./include/asm-generic/rwonce.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__))
unsigned long read_word_at_a_time(const void *addr)
{
kasan_check_read(addr, 1);
return *(unsigned long *)addr;
}
# 2 "./arch/riscv/include/generated/asm/rwonce.h" 2
# 249 "./include/linux/compiler.h" 2
# 21 "./include/linux/kernel.h" 2
# 1 "./include/linux/container_of.h" 1




# 1 "./include/linux/build_bug.h" 1
# 6 "./include/linux/container_of.h" 2
# 1 "./include/linux/err.h" 1







# 1 "./arch/riscv/include/generated/uapi/asm/errno.h" 1
# 9 "./include/linux/err.h" 2
# 24 "./include/linux/err.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void * __attribute__((__warn_unused_result__)) ERR_PTR(long error)
{
return (void *) error;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long __attribute__((__warn_unused_result__)) PTR_ERR( const void *ptr)
{
return (long) ptr;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __attribute__((__warn_unused_result__)) IS_ERR( const void *ptr)
{
return __builtin_expect(!!((unsigned long)(void *)((unsigned long)ptr) >= (unsigned long)-4095), 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __attribute__((__warn_unused_result__)) IS_ERR_OR_NULL( const void *ptr)
{
return __builtin_expect(!!(!ptr), 0) || __builtin_expect(!!((unsigned long)(void *)((unsigned long)ptr) >= (unsigned long)-4095), 0);
}
# 51 "./include/linux/err.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void * __attribute__((__warn_unused_result__)) ERR_CAST( const void *ptr)
{

return (void *) ptr;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) PTR_ERR_OR_ZERO( const void *ptr)
{
if (IS_ERR(ptr))
return PTR_ERR(ptr);
else
return 0;
}
# 7 "./include/linux/container_of.h" 2
# 22 "./include/linux/kernel.h" 2
# 1 "./include/linux/bitops.h" 1




# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 6 "./include/linux/bitops.h" 2
# 1 "./include/linux/bits.h" 1





# 1 "./include/vdso/bits.h" 1
# 7 "./include/linux/bits.h" 2
# 7 "./include/linux/bitops.h" 2
# 1 "./include/linux/typecheck.h" 1
# 8 "./include/linux/bitops.h" 2

# 1 "./include/uapi/linux/kernel.h" 1




# 1 "./include/uapi/linux/sysinfo.h" 1







struct sysinfo {
__kernel_long_t uptime;
__kernel_ulong_t loads[3];
__kernel_ulong_t totalram;
__kernel_ulong_t freeram;
__kernel_ulong_t sharedram;
__kernel_ulong_t bufferram;
__kernel_ulong_t totalswap;
__kernel_ulong_t freeswap;
__u16 procs;
__u16 pad;
__kernel_ulong_t totalhigh;
__kernel_ulong_t freehigh;
__u32 mem_unit;
char _f[20-2*sizeof(__kernel_ulong_t)-sizeof(__u32)];
};
# 6 "./include/uapi/linux/kernel.h" 2
# 10 "./include/linux/bitops.h" 2
# 24 "./include/linux/bitops.h"
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);






# 1 "./arch/riscv/include/asm/bitops.h" 1
# 14 "./arch/riscv/include/asm/bitops.h"
# 1 "./include/linux/irqflags.h" 1
# 16 "./include/linux/irqflags.h"
# 1 "./arch/riscv/include/asm/irqflags.h" 1
# 10 "./arch/riscv/include/asm/irqflags.h"
# 1 "./arch/riscv/include/asm/processor.h" 1
# 11 "./arch/riscv/include/asm/processor.h"
# 1 "./include/vdso/processor.h" 1
# 10 "./include/vdso/processor.h"
# 1 "./arch/riscv/include/asm/vdso/processor.h" 1






# 1 "./arch/riscv/include/asm/barrier.h" 1
# 72 "./arch/riscv/include/asm/barrier.h"
# 1 "./include/asm-generic/barrier.h" 1
# 18 "./include/asm-generic/barrier.h"
# 1 "./arch/riscv/include/generated/asm/rwonce.h" 1
# 19 "./include/asm-generic/barrier.h" 2
# 73 "./arch/riscv/include/asm/barrier.h" 2
# 8 "./arch/riscv/include/asm/vdso/processor.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpu_relax(void)
{

int dummy;

__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));

__asm__ __volatile__("": : :"memory");
}
# 11 "./include/vdso/processor.h" 2
# 12 "./arch/riscv/include/asm/processor.h" 2

# 1 "./arch/riscv/include/asm/ptrace.h" 1








# 1 "./arch/riscv/include/uapi/asm/ptrace.h" 1
# 19 "./arch/riscv/include/uapi/asm/ptrace.h"
struct user_regs_struct {
unsigned long pc;
unsigned long ra;
unsigned long sp;
unsigned long gp;
unsigned long tp;
unsigned long t0;
unsigned long t1;
unsigned long t2;
unsigned long s0;
unsigned long s1;
unsigned long a0;
unsigned long a1;
unsigned long a2;
unsigned long a3;
unsigned long a4;
unsigned long a5;
unsigned long a6;
unsigned long a7;
unsigned long s2;
unsigned long s3;
unsigned long s4;
unsigned long s5;
unsigned long s6;
unsigned long s7;
unsigned long s8;
unsigned long s9;
unsigned long s10;
unsigned long s11;
unsigned long t3;
unsigned long t4;
unsigned long t5;
unsigned long t6;
};

struct __riscv_f_ext_state {
__u32 f[32];
__u32 fcsr;
};

struct __riscv_d_ext_state {
__u64 f[32];
__u32 fcsr;
};

struct __riscv_q_ext_state {
__u64 f[64] __attribute__((aligned(16)));
__u32 fcsr;




__u32 reserved[3];
};

union __riscv_fp_state {
struct __riscv_f_ext_state f;
struct __riscv_d_ext_state d;
struct __riscv_q_ext_state q;
};
# 10 "./arch/riscv/include/asm/ptrace.h" 2
# 1 "./arch/riscv/include/asm/csr.h" 1








# 1 "./arch/riscv/include/asm/asm.h" 1
# 10 "./arch/riscv/include/asm/csr.h" 2
# 11 "./arch/riscv/include/asm/ptrace.h" 2




struct pt_regs {
unsigned long epc;
unsigned long ra;
unsigned long sp;
unsigned long gp;
unsigned long tp;
unsigned long t0;
unsigned long t1;
unsigned long t2;
unsigned long s0;
unsigned long s1;
unsigned long a0;
unsigned long a1;
unsigned long a2;
unsigned long a3;
unsigned long a4;
unsigned long a5;
unsigned long a6;
unsigned long a7;
unsigned long s2;
unsigned long s3;
unsigned long s4;
unsigned long s5;
unsigned long s6;
unsigned long s7;
unsigned long s8;
unsigned long s9;
unsigned long s10;
unsigned long s11;
unsigned long t3;
unsigned long t4;
unsigned long t5;
unsigned long t6;

unsigned long status;
unsigned long badaddr;
unsigned long cause;

unsigned long orig_a0;
};
# 67 "./arch/riscv/include/asm/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long instruction_pointer(struct pt_regs *regs)
{
return regs->epc;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->epc = val;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long user_stack_pointer(struct pt_regs *regs)
{
return regs->sp;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void user_stack_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->sp = val;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->sp;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long frame_pointer(struct pt_regs *regs)
{
return regs->s0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void frame_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->s0 = val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->a0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void regs_set_return_value(struct pt_regs *regs,
unsigned long val)
{
regs->a0 = val;
}

extern int regs_query_register_offset(const char *name);
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n);

void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer);
int do_syscall_trace_enter(struct pt_regs *regs);
void do_syscall_trace_exit(struct pt_regs *regs);
# 136 "./arch/riscv/include/asm/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long regs_get_register(struct pt_regs *regs,
unsigned int offset)
{
if (__builtin_expect(!!(offset > __builtin_offsetof(struct pt_regs, orig_a0)), 0))
return 0;

return *(unsigned long *)((unsigned long)regs + offset);
}
# 155 "./arch/riscv/include/asm/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long regs_get_kernel_argument(struct pt_regs *regs,
unsigned int n)
{
static const int nr_reg_arguments = 8;
static const unsigned int argument_offs[] = {
__builtin_offsetof(struct pt_regs, a0),
__builtin_offsetof(struct pt_regs, a1),
__builtin_offsetof(struct pt_regs, a2),
__builtin_offsetof(struct pt_regs, a3),
__builtin_offsetof(struct pt_regs, a4),
__builtin_offsetof(struct pt_regs, a5),
__builtin_offsetof(struct pt_regs, a6),
__builtin_offsetof(struct pt_regs, a7),
};

if (n < nr_reg_arguments)
return regs_get_register(regs, argument_offs[n]);
return 0;
}
# 14 "./arch/riscv/include/asm/processor.h" 2
# 27 "./arch/riscv/include/asm/processor.h"
struct task_struct;
struct pt_regs;


struct thread_struct {

unsigned long ra;
unsigned long sp;
unsigned long s[12];
struct __riscv_d_ext_state fstate;
unsigned long bad_cause;
};


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{
*offset = __builtin_offsetof(struct thread_struct, fstate);
*size = sizeof((((struct thread_struct *)0)->fstate));
}
# 61 "./arch/riscv/include/asm/processor.h"
extern void start_thread(struct pt_regs *regs,
unsigned long pc, unsigned long sp);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void release_thread(struct task_struct *dead_task)
{
}

extern unsigned long __get_wchan(struct task_struct *p);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wait_for_interrupt(void)
{
__asm__ __volatile__ ("wfi");
}

struct device_node;
int riscv_of_processor_hartid(struct device_node *node);
int riscv_of_parent_hartid(struct device_node *node);

extern void riscv_fill_hwcap(void);
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
# 11 "./arch/riscv/include/asm/irqflags.h" 2



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long arch_local_save_flags(void)
{
return ({ register unsigned long __v; __asm__ __volatile__ ("csrr %0, " "0x100" : "=r" (__v) : : "memory"); __v; });
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_local_irq_enable(void)
{
({ unsigned long __v = (unsigned long)((0x00000002UL)); __asm__ __volatile__ ("csrs " "0x100" ", %0" : : "rK" (__v) : "memory"); });
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_local_irq_disable(void)
{
({ unsigned long __v = (unsigned long)((0x00000002UL)); __asm__ __volatile__ ("csrc " "0x100" ", %0" : : "rK" (__v) : "memory"); });
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long arch_local_irq_save(void)
{
return ({ unsigned long __v = (unsigned long)((0x00000002UL)); __asm__ __volatile__ ("csrrc %0, " "0x100" ", %1" : "=r" (__v) : "rK" (__v) : "memory"); __v; });
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & (0x00000002UL));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_local_irq_restore(unsigned long flags)
{
({ unsigned long __v = (unsigned long)(flags & (0x00000002UL)); __asm__ __volatile__ ("csrs " "0x100" ", %0" : : "rK" (__v) : "memory"); });
}
# 17 "./include/linux/irqflags.h" 2
# 1 "./arch/riscv/include/generated/asm/percpu.h" 1
# 1 "./include/asm-generic/percpu.h" 1





# 1 "./include/linux/threads.h" 1
# 7 "./include/asm-generic/percpu.h" 2
# 1 "./include/linux/percpu-defs.h" 1
# 308 "./include/linux/percpu-defs.h"
extern void __bad_size_call_parameter(void);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __this_cpu_preempt_check(const char *op) { }
# 8 "./include/asm-generic/percpu.h" 2
# 19 "./include/asm-generic/percpu.h"
extern unsigned long __per_cpu_offset[32];
# 2 "./arch/riscv/include/generated/asm/percpu.h" 2
# 18 "./include/linux/irqflags.h" 2
# 27 "./include/linux/irqflags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lockdep_softirqs_on(unsigned long ip) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lockdep_softirqs_off(unsigned long ip) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lockdep_hardirqs_on_prepare(unsigned long ip) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lockdep_hardirqs_on(unsigned long ip) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lockdep_hardirqs_off(unsigned long ip) { }





struct irqtrace_events {
unsigned int irq_events;
unsigned long hardirq_enable_ip;
unsigned long hardirq_disable_ip;
unsigned int hardirq_enable_event;
unsigned int hardirq_disable_event;
unsigned long softirq_disable_ip;
unsigned long softirq_enable_ip;
unsigned int softirq_disable_event;
unsigned int softirq_enable_event;
};

extern __attribute__((section(".data..percpu" ""))) __typeof__(int) hardirqs_enabled;
extern __attribute__((section(".data..percpu" ""))) __typeof__(int) hardirq_context;

extern void trace_hardirqs_on_prepare(void);
extern void trace_hardirqs_off_finish(void);
extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void);
# 152 "./include/linux/irqflags.h"
extern void stop_critical_timings(void);
extern void start_critical_timings(void);
# 15 "./arch/riscv/include/asm/bitops.h" 2



# 1 "./include/asm-generic/bitops/__ffs.h" 1




# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 6 "./include/asm-generic/bitops/__ffs.h" 2







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned long __ffs(unsigned long word)
{
int num = 0;


if ((word & 0xffffffff) == 0) {
num += 32;
word >>= 32;
}

if ((word & 0xffff) == 0) {
num += 16;
word >>= 16;
}
if ((word & 0xff) == 0) {
num += 8;
word >>= 8;
}
if ((word & 0xf) == 0) {
num += 4;
word >>= 4;
}
if ((word & 0x3) == 0) {
num += 2;
word >>= 2;
}
if ((word & 0x1) == 0)
num += 1;
return num;
}
# 19 "./arch/riscv/include/asm/bitops.h" 2
# 1 "./include/asm-generic/bitops/ffz.h" 1
# 20 "./arch/riscv/include/asm/bitops.h" 2
# 1 "./include/asm-generic/bitops/fls.h" 1
# 13 "./include/asm-generic/bitops/fls.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int fls(unsigned int x)
{
int r = 32;

if (!x)
return 0;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
# 21 "./arch/riscv/include/asm/bitops.h" 2
# 1 "./include/asm-generic/bitops/__fls.h" 1




# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 6 "./include/asm-generic/bitops/__fls.h" 2







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned long __fls(unsigned long word)
{
int num = 64 - 1;


if (!(word & (~0ul << 32))) {
num -= 32;
word <<= 32;
}

if (!(word & (~0ul << (64 -16)))) {
num -= 16;
word <<= 16;
}
if (!(word & (~0ul << (64 -8)))) {
num -= 8;
word <<= 8;
}
if (!(word & (~0ul << (64 -4)))) {
num -= 4;
word <<= 4;
}
if (!(word & (~0ul << (64 -2)))) {
num -= 2;
word <<= 2;
}
if (!(word & (~0ul << (64 -1))))
num -= 1;
return num;
}
# 22 "./arch/riscv/include/asm/bitops.h" 2
# 1 "./include/asm-generic/bitops/fls64.h" 1




# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 6 "./include/asm-generic/bitops/fls64.h" 2
# 27 "./include/asm-generic/bitops/fls64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int fls64(__u64 x)
{
if (x == 0)
return 0;
return __fls(x) + 1;
}
# 23 "./arch/riscv/include/asm/bitops.h" 2
# 1 "./include/asm-generic/bitops/sched.h" 1





# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 7 "./include/asm-generic/bitops/sched.h" 2






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sched_find_first_bit(const unsigned long *b)
{

if (b[0])
return __ffs(b[0]);
return __ffs(b[1]) + 64;
# 30 "./include/asm-generic/bitops/sched.h"
}
# 24 "./arch/riscv/include/asm/bitops.h" 2
# 1 "./include/asm-generic/bitops/ffs.h" 1
# 13 "./include/asm-generic/bitops/ffs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ffs(int x)
{
int r = 1;

if (!x)
return 0;
if (!(x & 0xffff)) {
x >>= 16;
r += 16;
}
if (!(x & 0xff)) {
x >>= 8;
r += 8;
}
if (!(x & 0xf)) {
x >>= 4;
r += 4;
}
if (!(x & 3)) {
x >>= 2;
r += 2;
}
if (!(x & 1)) {
x >>= 1;
r += 1;
}
return r;
}
# 25 "./arch/riscv/include/asm/bitops.h" 2

# 1 "./include/asm-generic/bitops/hweight.h" 1




# 1 "./include/asm-generic/bitops/arch_hweight.h" 1




# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 6 "./include/asm-generic/bitops/arch_hweight.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int __arch_hweight32(unsigned int w)
{
return __sw_hweight32(w);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int __arch_hweight16(unsigned int w)
{
return __sw_hweight16(w);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int __arch_hweight8(unsigned int w)
{
return __sw_hweight8(w);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long __arch_hweight64(__u64 w)
{
return __sw_hweight64(w);
}
# 6 "./include/asm-generic/bitops/hweight.h" 2
# 1 "./include/asm-generic/bitops/const_hweight.h" 1
# 7 "./include/asm-generic/bitops/hweight.h" 2
# 27 "./arch/riscv/include/asm/bitops.h" 2
# 71 "./arch/riscv/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_and_set_bit(int nr, volatile unsigned long *addr)
{
return ({ unsigned long __res, __mask; __mask = ((((1UL))) << ((nr) % 64)); __asm__ __volatile__ ( "amo" "or" ".d" ".aqrl" " %0, %2, %1" : "=r" (__res), "+A" (addr[((nr) / 64)]) : "r" ((__mask)) : "memory"); ((__res & __mask) != 0); });
}
# 83 "./arch/riscv/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
return ({ unsigned long __res, __mask; __mask = ((((1UL))) << ((nr) % 64)); __asm__ __volatile__ ( "amo" "and" ".d" ".aqrl" " %0, %2, %1" : "=r" (__res), "+A" (addr[((nr) / 64)]) : "r" ((~(__mask))) : "memory"); ((__res & __mask) != 0); });
}
# 96 "./arch/riscv/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_and_change_bit(int nr, volatile unsigned long *addr)
{
return ({ unsigned long __res, __mask; __mask = ((((1UL))) << ((nr) % 64)); __asm__ __volatile__ ( "amo" "xor" ".d" ".aqrl" " %0, %2, %1" : "=r" (__res), "+A" (addr[((nr) / 64)]) : "r" ((__mask)) : "memory"); ((__res & __mask) != 0); });
}
# 113 "./arch/riscv/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_bit(int nr, volatile unsigned long *addr)
{
__asm__ __volatile__ ( "amo" "or" ".d" "" " zero, %1, %0" : "+A" (addr[((nr) / 64)]) : "r" ((((((1UL))) << ((nr) % 64)))) : "memory");;
}
# 127 "./arch/riscv/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_bit(int nr, volatile unsigned long *addr)
{
__asm__ __volatile__ ( "amo" "and" ".d" "" " zero, %1, %0" : "+A" (addr[((nr) / 64)]) : "r" ((~(((((1UL))) << ((nr) % 64))))) : "memory");;
}
# 141 "./arch/riscv/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void change_bit(int nr, volatile unsigned long *addr)
{
__asm__ __volatile__ ( "amo" "xor" ".d" "" " zero, %1, %0" : "+A" (addr[((nr) / 64)]) : "r" ((((((1UL))) << ((nr) % 64)))) : "memory");;
}
# 154 "./arch/riscv/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_and_set_bit_lock(
unsigned long nr, volatile unsigned long *addr)
{
return ({ unsigned long __res, __mask; __mask = ((((1UL))) << ((nr) % 64)); __asm__ __volatile__ ( "amo" "or" ".d" ".aq" " %0, %2, %1" : "=r" (__res), "+A" (addr[((nr) / 64)]) : "r" ((__mask)) : "memory"); ((__res & __mask) != 0); });
}
# 167 "./arch/riscv/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
__asm__ __volatile__ ( "amo" "and" ".d" ".rl" " zero, %1, %0" : "+A" (addr[((nr) / 64)]) : "r" ((~(((((1UL))) << ((nr) % 64))))) : "memory");;
}
# 188 "./arch/riscv/include/asm/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
clear_bit_unlock(nr, addr);
}








# 1 "./include/asm-generic/bitops/non-atomic.h" 1




# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 6 "./include/asm-generic/bitops/non-atomic.h" 2
# 16 "./include/asm-generic/bitops/non-atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch___set_bit(unsigned int nr, volatile unsigned long *addr)
{
unsigned long mask = ((((1UL))) << ((nr) % 64));
unsigned long *p = ((unsigned long *)addr) + ((nr) / 64);

*p |= mask;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch___clear_bit(unsigned int nr, volatile unsigned long *addr)
{
unsigned long mask = ((((1UL))) << ((nr) % 64));
unsigned long *p = ((unsigned long *)addr) + ((nr) / 64);

*p &= ~mask;
}
# 45 "./include/asm-generic/bitops/non-atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__))
void arch___change_bit(unsigned int nr, volatile unsigned long *addr)
{
unsigned long mask = ((((1UL))) << ((nr) % 64));
unsigned long *p = ((unsigned long *)addr) + ((nr) / 64);

*p ^= mask;
}
# 64 "./include/asm-generic/bitops/non-atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr)
{
unsigned long mask = ((((1UL))) << ((nr) % 64));
unsigned long *p = ((unsigned long *)addr) + ((nr) / 64);
unsigned long old = *p;

*p = old | mask;
return (old & mask) != 0;
}
# 85 "./include/asm-generic/bitops/non-atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr)
{
unsigned long mask = ((((1UL))) << ((nr) % 64));
unsigned long *p = ((unsigned long *)addr) + ((nr) / 64);
unsigned long old = *p;

*p = old & ~mask;
return (old & mask) != 0;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch___test_and_change_bit(unsigned int nr, volatile unsigned long *addr)
{
unsigned long mask = ((((1UL))) << ((nr) % 64));
unsigned long *p = ((unsigned long *)addr) + ((nr) / 64);
unsigned long old = *p;

*p = old ^ mask;
return (old & mask) != 0;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return 1UL & (addr[((nr) / 64)] >> (nr & (64 -1)));
}
# 201 "./arch/riscv/include/asm/bitops.h" 2
# 1 "./include/asm-generic/bitops/le.h" 1




# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 6 "./include/asm-generic/bitops/le.h" 2
# 1 "./arch/riscv/include/uapi/asm/byteorder.h" 1
# 10 "./arch/riscv/include/uapi/asm/byteorder.h"
# 1 "./include/linux/byteorder/little_endian.h" 1




# 1 "./include/uapi/linux/byteorder/little_endian.h" 1
# 14 "./include/uapi/linux/byteorder/little_endian.h"
# 1 "./include/linux/swab.h" 1




# 1 "./include/uapi/linux/swab.h" 1







# 1 "./arch/riscv/include/generated/uapi/asm/swab.h" 1
# 1 "./include/uapi/asm-generic/swab.h" 1
# 2 "./arch/riscv/include/generated/uapi/asm/swab.h" 2
# 9 "./include/uapi/linux/swab.h" 2
# 48 "./include/uapi/linux/swab.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__const__)) __u16 __fswab16(__u16 val)
{



return ((__u16)( (((__u16)(val) & (__u16)0x00ffU) << 8) | (((__u16)(val) & (__u16)0xff00U) >> 8)));

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__const__)) __u32 __fswab32(__u32 val)
{



return ((__u32)( (((__u32)(val) & (__u32)0x000000ffUL) << 24) | (((__u32)(val) & (__u32)0x0000ff00UL) << 8) | (((__u32)(val) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(val) & (__u32)0xff000000UL) >> 24)));

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__const__)) __u64 __fswab64(__u64 val)
{







return ((__u64)( (((__u64)(val) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(val) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(val) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(val) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(val) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(val) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(val) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(val) & (__u64)0xff00000000000000ULL) >> 56)));

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__const__)) __u32 __fswahw32(__u32 val)
{



return ((__u32)( (((__u32)(val) & (__u32)0x0000ffffUL) << 16) | (((__u32)(val) & (__u32)0xffff0000UL) >> 16)));

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__const__)) __u32 __fswahb32(__u32 val)
{



return ((__u32)( (((__u32)(val) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(val) & (__u32)0xff00ff00UL) >> 8)));

}
# 136 "./include/uapi/linux/swab.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned long __swab(const unsigned long y)
{

return (__builtin_constant_p((__u64)(y)) ? ((__u64)( (((__u64)(y) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(y) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(y) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(y) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(y) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(y) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(y) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(y) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(y));



}
# 171 "./include/uapi/linux/swab.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __u16 __swab16p(const __u16 *p)
{



return (__builtin_constant_p((__u16)(*p)) ? ((__u16)( (((__u16)(*p) & (__u16)0x00ffU) << 8) | (((__u16)(*p) & (__u16)0xff00U) >> 8))) : __fswab16(*p));

}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __u32 __swab32p(const __u32 *p)
{



return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x000000ffUL) << 24) | (((__u32)(*p) & (__u32)0x0000ff00UL) << 8) | (((__u32)(*p) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(*p) & (__u32)0xff000000UL) >> 24))) : __fswab32(*p));

}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __u64 __swab64p(const __u64 *p)
{



return (__builtin_constant_p((__u64)(*p)) ? ((__u64)( (((__u64)(*p) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(*p) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(*p) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(*p) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(*p) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(*p) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(*p) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(*p) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(*p));

}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 __swahw32p(const __u32 *p)
{



return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x0000ffffUL) << 16) | (((__u32)(*p) & (__u32)0xffff0000UL) >> 16))) : __fswahw32(*p));

}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 __swahb32p(const __u32 *p)
{



return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(*p) & (__u32)0xff00ff00UL) >> 8))) : __fswahb32(*p));

}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __swab16s(__u16 *p)
{



*p = __swab16p(p);

}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __swab32s(__u32 *p)
{



*p = __swab32p(p);

}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __swab64s(__u64 *p)
{



*p = __swab64p(p);

}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __swahw32s(__u32 *p)
{



*p = __swahw32p(p);

}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __swahb32s(__u32 *p)
{



*p = __swahb32p(p);

}
# 6 "./include/linux/swab.h" 2
# 15 "./include/uapi/linux/byteorder/little_endian.h" 2
# 45 "./include/uapi/linux/byteorder/little_endian.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __le64 __cpu_to_le64p(const __u64 *p)
{
return ( __le64)*p;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __u64 __le64_to_cpup(const __le64 *p)
{
return ( __u64)*p;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __le32 __cpu_to_le32p(const __u32 *p)
{
return ( __le32)*p;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __u32 __le32_to_cpup(const __le32 *p)
{
return ( __u32)*p;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __le16 __cpu_to_le16p(const __u16 *p)
{
return ( __le16)*p;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __u16 __le16_to_cpup(const __le16 *p)
{
return ( __u16)*p;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __be64 __cpu_to_be64p(const __u64 *p)
{
return ( __be64)__swab64p(p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __u64 __be64_to_cpup(const __be64 *p)
{
return __swab64p((__u64 *)p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __be32 __cpu_to_be32p(const __u32 *p)
{
return ( __be32)__swab32p(p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __u32 __be32_to_cpup(const __be32 *p)
{
return __swab32p((__u32 *)p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __be16 __cpu_to_be16p(const __u16 *p)
{
return ( __be16)__swab16p(p);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __u16 __be16_to_cpup(const __be16 *p)
{
return __swab16p((__u16 *)p);
}
# 6 "./include/linux/byteorder/little_endian.h" 2





# 1 "./include/linux/byteorder/generic.h" 1
# 144 "./include/linux/byteorder/generic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void le16_add_cpu(__le16 *var, u16 val)
{
*var = (( __le16)(__u16)((( __u16)(__le16)(*var)) + val));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void le32_add_cpu(__le32 *var, u32 val)
{
*var = (( __le32)(__u32)((( __u32)(__le32)(*var)) + val));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void le64_add_cpu(__le64 *var, u64 val)
{
*var = (( __le64)(__u64)((( __u64)(__le64)(*var)) + val));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void le32_to_cpu_array(u32 *buf, unsigned int words)
{
while (words--) {
do { (void)(buf); } while (0);
buf++;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpu_to_le32_array(u32 *buf, unsigned int words)
{
while (words--) {
do { (void)(buf); } while (0);
buf++;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void be16_add_cpu(__be16 *var, u16 val)
{
*var = (( __be16)(__builtin_constant_p((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val))) ? ((__u16)( (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0x00ffU) << 8) | (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0xff00U) >> 8))) : __fswab16(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void be32_add_cpu(__be32 *var, u32 val)
{
*var = (( __be32)(__builtin_constant_p((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val))) ? ((__u32)( (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void be64_add_cpu(__be64 *var, u64 val)
{
*var = (( __be64)(__builtin_constant_p((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val))) ? ((__u64)( (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
{
size_t i;

for (i = 0; i < len; i++)
dst[i] = (( __be32)(__builtin_constant_p((__u32)((src[i]))) ? ((__u32)( (((__u32)((src[i])) & (__u32)0x000000ffUL) << 24) | (((__u32)((src[i])) & (__u32)0x0000ff00UL) << 8) | (((__u32)((src[i])) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((src[i])) & (__u32)0xff000000UL) >> 24))) : __fswab32((src[i]))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len)
{
size_t i;

for (i = 0; i < len; i++)
dst[i] = (__builtin_constant_p((__u32)(( __u32)(__be32)(src[i]))) ? ((__u32)( (((__u32)(( __u32)(__be32)(src[i])) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(src[i])) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(src[i])) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(src[i])) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(src[i])));
}
# 12 "./include/linux/byteorder/little_endian.h" 2
# 11 "./arch/riscv/include/uapi/asm/byteorder.h" 2
# 7 "./include/asm-generic/bitops/le.h" 2
# 19 "./include/asm-generic/bitops/le.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_bit_le(int nr, const void *addr)
{
return arch_test_bit(nr ^ 0, addr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_bit_le(int nr, void *addr)
{
set_bit(nr ^ 0, addr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_bit_le(int nr, void *addr)
{
clear_bit(nr ^ 0, addr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __set_bit_le(int nr, void *addr)
{
arch___set_bit(nr ^ 0, addr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __clear_bit_le(int nr, void *addr)
{
arch___clear_bit(nr ^ 0, addr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_and_set_bit_le(int nr, void *addr)
{
return test_and_set_bit(nr ^ 0, addr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_and_clear_bit_le(int nr, void *addr)
{
return test_and_clear_bit(nr ^ 0, addr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __test_and_set_bit_le(int nr, void *addr)
{
return arch___test_and_set_bit(nr ^ 0, addr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __test_and_clear_bit_le(int nr, void *addr)
{
return arch___test_and_clear_bit(nr ^ 0, addr);
}
# 202 "./arch/riscv/include/asm/bitops.h" 2
# 1 "./include/asm-generic/bitops/ext2-atomic.h" 1
# 203 "./arch/riscv/include/asm/bitops.h" 2
# 34 "./include/linux/bitops.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_bitmask_order(unsigned int count)
{
int order;

order = fls(count);
return order;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? (__builtin_constant_p(w) ? ((((unsigned int) ((!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))))) + ((unsigned int) ((!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7)))))) + (((unsigned int) ((!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))))) + ((unsigned int) ((!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))))))) : __arch_hweight32(w)) : (__builtin_constant_p((__u64)w) ? (((((unsigned int) ((!!(((__u64)w) & (1ULL << 0))) + (!!(((__u64)w) & (1ULL << 1))) + (!!(((__u64)w) & (1ULL << 2))) + (!!(((__u64)w) & (1ULL << 3))) + (!!(((__u64)w) & (1ULL << 4))) + (!!(((__u64)w) & (1ULL << 5))) + (!!(((__u64)w) & (1ULL << 6))) + (!!(((__u64)w) & (1ULL << 7))))) + ((unsigned int) ((!!((((__u64)w) >> 8) & (1ULL << 0))) + (!!((((__u64)w) >> 8) & (1ULL << 1))) + (!!((((__u64)w) >> 8) & (1ULL << 2))) + (!!((((__u64)w) >> 8) & (1ULL << 3))) + (!!((((__u64)w) >> 8) & (1ULL << 4))) + (!!((((__u64)w) >> 8) & (1ULL << 5))) + (!!((((__u64)w) >> 8) & (1ULL << 6))) + (!!((((__u64)w) >> 8) & (1ULL << 7)))))) + (((unsigned int) ((!!((((__u64)w) >> 16) & (1ULL << 0))) + (!!((((__u64)w) >> 16) & (1ULL << 1))) + (!!((((__u64)w) >> 16) & (1ULL << 2))) + (!!((((__u64)w) >> 16) & (1ULL << 3))) + (!!((((__u64)w) >> 16) & (1ULL << 4))) + (!!((((__u64)w) >> 16) & (1ULL << 5))) + (!!((((__u64)w) >> 16) & (1ULL << 6))) + (!!((((__u64)w) >> 16) & (1ULL << 7))))) + ((unsigned int) ((!!(((((__u64)w) >> 16) >> 8) & (1ULL << 0))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 1))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 2))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 3))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 4))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 5))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 6))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 7))))))) + ((((unsigned int) ((!!((((__u64)w) >> 32) & (1ULL << 0))) + (!!((((__u64)w) >> 32) & (1ULL << 1))) + (!!((((__u64)w) >> 32) & (1ULL << 2))) + (!!((((__u64)w) >> 32) & (1ULL << 3))) + (!!((((__u64)w) >> 32) & (1ULL << 4))) + (!!((((__u64)w) >> 32) & (1ULL << 5))) + (!!((((__u64)w) >> 32) & (1ULL << 6))) + (!!((((__u64)w) >> 32) & (1ULL << 7))))) + ((unsigned int) ((!!(((((__u64)w) >> 32) >> 8) & (1ULL << 0))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 1))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 2))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 3))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 4))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 5))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 6))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 7)))))) + (((unsigned int) ((!!(((((__u64)w) >> 32) >> 16) & (1ULL << 0))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 1))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 2))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 3))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 4))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 5))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 6))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 7))))) + ((unsigned int) ((!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 0))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 1))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 2))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 3))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 4))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 5))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 6))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 7)))))))) : __arch_hweight64((__u64)w));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u64 rol64(__u64 word, unsigned int shift)
{
return (word << (shift & 63)) | (word >> ((-shift) & 63));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u64 ror64(__u64 word, unsigned int shift)
{
return (word >> (shift & 63)) | (word << ((-shift) & 63));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 rol32(__u32 word, unsigned int shift)
{
return (word << (shift & 31)) | (word >> ((-shift) & 31));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 ror32(__u32 word, unsigned int shift)
{
return (word >> (shift & 31)) | (word << ((-shift) & 31));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u16 rol16(__u16 word, unsigned int shift)
{
return (word << (shift & 15)) | (word >> ((-shift) & 15));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u16 ror16(__u16 word, unsigned int shift)
{
return (word >> (shift & 15)) | (word << ((-shift) & 15));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u8 rol8(__u8 word, unsigned int shift)
{
return (word << (shift & 7)) | (word >> ((-shift) & 7));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u8 ror8(__u8 word, unsigned int shift)
{
return (word >> (shift & 7)) | (word << ((-shift) & 7));
}
# 135 "./include/linux/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __s32 sign_extend32(__u32 value, int index)
{
__u8 shift = 31 - index;
return (__s32)(value << shift) >> shift;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __s64 sign_extend64(__u64 value, int index)
{
__u8 shift = 63 - index;
return (__s64)(value << shift) >> shift;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned fls_long(unsigned long l)
{
if (sizeof(l) == 4)
return fls(l);
return fls64(l);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_count_order(unsigned int count)
{
if (count == 0)
return -1;

return fls(--count);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_count_order_long(unsigned long l)
{
if (l == 0UL)
return -1;
return (int)fls_long(--l);
}
# 188 "./include/linux/bitops.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long __ffs64(u64 word)
{






return __ffs((unsigned long)word);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void assign_bit(long nr, volatile unsigned long *addr,
bool value)
{
if (value)
set_bit(nr, addr);
else
clear_bit(nr, addr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __assign_bit(long nr, volatile unsigned long *addr,
bool value)
{
if (value)
arch___set_bit(nr, addr);
else
arch___clear_bit(nr, addr);
}
# 23 "./include/linux/kernel.h" 2
# 1 "./include/linux/kstrtox.h" 1








int __attribute__((__warn_unused_result__)) _kstrtoul(const char *s, unsigned int base, unsigned long *res);
int __attribute__((__warn_unused_result__)) _kstrtol(const char *s, unsigned int base, long *res);

int __attribute__((__warn_unused_result__)) kstrtoull(const char *s, unsigned int base, unsigned long long *res);
int __attribute__((__warn_unused_result__)) kstrtoll(const char *s, unsigned int base, long long *res);
# 30 "./include/linux/kstrtox.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) kstrtoul(const char *s, unsigned int base, unsigned long *res)
{




if (sizeof(unsigned long) == sizeof(unsigned long long) &&
__alignof__(unsigned long) == __alignof__(unsigned long long))
return kstrtoull(s, base, (unsigned long long *)res);
else
return _kstrtoul(s, base, res);
}
# 58 "./include/linux/kstrtox.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) kstrtol(const char *s, unsigned int base, long *res)
{




if (sizeof(long) == sizeof(long long) &&
__alignof__(long) == __alignof__(long long))
return kstrtoll(s, base, (long long *)res);
else
return _kstrtol(s, base, res);
}

int __attribute__((__warn_unused_result__)) kstrtouint(const char *s, unsigned int base, unsigned int *res);
int __attribute__((__warn_unused_result__)) kstrtoint(const char *s, unsigned int base, int *res);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) kstrtou64(const char *s, unsigned int base, u64 *res)
{
return kstrtoull(s, base, res);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) kstrtos64(const char *s, unsigned int base, s64 *res)
{
return kstrtoll(s, base, res);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) kstrtou32(const char *s, unsigned int base, u32 *res)
{
return kstrtouint(s, base, res);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) kstrtos32(const char *s, unsigned int base, s32 *res)
{
return kstrtoint(s, base, res);
}

int __attribute__((__warn_unused_result__)) kstrtou16(const char *s, unsigned int base, u16 *res);
int __attribute__((__warn_unused_result__)) kstrtos16(const char *s, unsigned int base, s16 *res);
int __attribute__((__warn_unused_result__)) kstrtou8(const char *s, unsigned int base, u8 *res);
int __attribute__((__warn_unused_result__)) kstrtos8(const char *s, unsigned int base, s8 *res);
int __attribute__((__warn_unused_result__)) kstrtobool(const char *s, bool *res);

int __attribute__((__warn_unused_result__)) kstrtoull_from_user(const char *s, size_t count, unsigned int base, unsigned long long *res);
int __attribute__((__warn_unused_result__)) kstrtoll_from_user(const char *s, size_t count, unsigned int base, long long *res);
int __attribute__((__warn_unused_result__)) kstrtoul_from_user(const char *s, size_t count, unsigned int base, unsigned long *res);
int __attribute__((__warn_unused_result__)) kstrtol_from_user(const char *s, size_t count, unsigned int base, long *res);
int __attribute__((__warn_unused_result__)) kstrtouint_from_user(const char *s, size_t count, unsigned int base, unsigned int *res);
int __attribute__((__warn_unused_result__)) kstrtoint_from_user(const char *s, size_t count, unsigned int base, int *res);
int __attribute__((__warn_unused_result__)) kstrtou16_from_user(const char *s, size_t count, unsigned int base, u16 *res);
int __attribute__((__warn_unused_result__)) kstrtos16_from_user(const char *s, size_t count, unsigned int base, s16 *res);
int __attribute__((__warn_unused_result__)) kstrtou8_from_user(const char *s, size_t count, unsigned int base, u8 *res);
int __attribute__((__warn_unused_result__)) kstrtos8_from_user(const char *s, size_t count, unsigned int base, s8 *res);
int __attribute__((__warn_unused_result__)) kstrtobool_from_user(const char *s, size_t count, bool *res);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) kstrtou64_from_user(const char *s, size_t count, unsigned int base, u64 *res)
{
return kstrtoull_from_user(s, count, base, res);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) kstrtos64_from_user(const char *s, size_t count, unsigned int base, s64 *res)
{
return kstrtoll_from_user(s, count, base, res);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) kstrtou32_from_user(const char *s, size_t count, unsigned int base, u32 *res)
{
return kstrtouint_from_user(s, count, base, res);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) kstrtos32_from_user(const char *s, size_t count, unsigned int base, s32 *res)
{
return kstrtoint_from_user(s, count, base, res);
}
# 145 "./include/linux/kstrtox.h"
extern unsigned long simple_strtoul(const char *,char **,unsigned int);
extern long simple_strtol(const char *,char **,unsigned int);
extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
extern long long simple_strtoll(const char *,char **,unsigned int);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int strtobool(const char *s, bool *res)
{
return kstrtobool(s, res);
}
# 24 "./include/linux/kernel.h" 2
# 1 "./include/linux/log2.h" 1
# 21 "./include/linux/log2.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((const))
int __ilog2_u32(u32 n)
{
return fls(n) - 1;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((const))
int __ilog2_u64(u64 n)
{
return fls64(n) - 1;
}
# 44 "./include/linux/log2.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((const))
unsigned long __roundup_pow_of_two(unsigned long n)
{
return 1UL << fls_long(n - 1);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((const))
unsigned long __rounddown_pow_of_two(unsigned long n)
{
return 1UL << (fls_long(n) - 1);
}
# 198 "./include/linux/log2.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__const__))
int __order_base_2(unsigned long n)
{
return n > 1 ? ( __builtin_constant_p(n - 1) ? ((n - 1) < 2 ? 0 : 63 - __builtin_clzll(n - 1)) : (sizeof(n - 1) <= 4) ? __ilog2_u32(n - 1) : __ilog2_u64(n - 1) ) + 1 : 0;
}
# 225 "./include/linux/log2.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((const))
int __bits_per(unsigned long n)
{
if (n < 2)
return 1;
if (is_power_of_2(n))
return ( __builtin_constant_p(n) ? ( ((n) == 0 || (n) == 1) ? 0 : ( __builtin_constant_p((n) - 1) ? (((n) - 1) < 2 ? 0 : 63 - __builtin_clzll((n) - 1)) : (sizeof((n) - 1) <= 4) ? __ilog2_u32((n) - 1) : __ilog2_u64((n) - 1) ) + 1) : __order_base_2(n) ) + 1;
return ( __builtin_constant_p(n) ? ( ((n) == 0 || (n) == 1) ? 0 : ( __builtin_constant_p((n) - 1) ? (((n) - 1) < 2 ? 0 : 63 - __builtin_clzll((n) - 1)) : (sizeof((n) - 1) <= 4) ? __ilog2_u32((n) - 1) : __ilog2_u64((n) - 1) ) + 1) : __order_base_2(n) );
}
# 25 "./include/linux/kernel.h" 2
# 1 "./include/linux/math.h" 1





# 1 "./arch/riscv/include/generated/asm/div64.h" 1
# 1 "./include/asm-generic/div64.h" 1
# 2 "./arch/riscv/include/generated/asm/div64.h" 2
# 7 "./include/linux/math.h" 2
# 115 "./include/linux/math.h"
struct s16_fract { __s16 numerator; __s16 denominator; };
struct u16_fract { __u16 numerator; __u16 denominator; };
struct s32_fract { __s32 numerator; __s32 denominator; };
struct u32_fract { __u32 numerator; __u32 denominator; };
# 172 "./include/linux/math.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 reciprocal_scale(u32 val, u32 ep_ro)
{
return (u32)(((u64) val * ep_ro) >> 32);
}

u64 int_pow(u64 base, unsigned int exp);
unsigned long int_sqrt(unsigned long);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 int_sqrt64(u64 x)
{
return (u32)int_sqrt(x);
}
# 26 "./include/linux/kernel.h" 2
# 1 "./include/linux/minmax.h" 1
# 27 "./include/linux/kernel.h" 2

# 1 "./include/linux/panic.h" 1







struct pt_regs;

extern long (*panic_blink)(int state);
__attribute__((__format__(printf, 1, 2)))
void panic(const char *fmt, ...) __attribute__((__noreturn__)) __attribute__((__cold__));
void nmi_panic(struct pt_regs *regs, const char *msg);
extern void oops_enter(void);
extern void oops_exit(void);
extern bool oops_may_print(void);


extern unsigned int sysctl_oops_all_cpu_backtrace;




extern int panic_timeout;
extern unsigned long panic_print;
extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
extern int panic_on_warn;

extern unsigned long panic_on_taint;
extern bool panic_on_taint_nousertaint;

extern int sysctl_panic_on_rcu_stall;
extern int sysctl_max_rcu_stall_to_panic;
extern int sysctl_panic_on_stackoverflow;

extern bool crash_kexec_post_notifiers;






extern atomic_t panic_cpu;






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_arch_panic_timeout(int timeout, int arch_default_timeout)
{
if (panic_timeout == arch_default_timeout)
panic_timeout = timeout;
}
# 80 "./include/linux/panic.h"
struct taint_flag {
char c_true;
char c_false;
bool module;
};

extern const struct taint_flag taint_flags[18];

enum lockdep_ok {
LOCKDEP_STILL_OK,
LOCKDEP_NOW_UNRELIABLE,
};

extern const char *print_tainted(void);
extern void add_taint(unsigned flag, enum lockdep_ok);
extern int test_taint(unsigned flag);
extern unsigned long get_taint(void);
# 29 "./include/linux/kernel.h" 2
# 1 "./include/linux/printk.h" 1





# 1 "./include/linux/init.h" 1
# 116 "./include/linux/init.h"
typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void);
# 127 "./include/linux/init.h"
typedef initcall_t initcall_entry_t;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) initcall_t initcall_from_entry(initcall_entry_t *entry)
{
return *entry;
}


extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];


typedef void (*ctor_fn_t)(void);

struct file_system_type;


extern int do_one_initcall(initcall_t fn);
extern char __attribute__((__section__(".init.data"))) boot_command_line[];
extern char *saved_command_line;
extern unsigned int reset_devices;


void setup_arch(char **);
void prepare_namespace(void);
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) init_rootfs(void);
extern struct file_system_type rootfs_fs_type;


extern bool rodata_enabled;


void mark_rodata_ro(void);


extern void (*late_time_init)(void);

extern bool initcall_debug;
# 303 "./include/linux/init.h"
struct obs_kernel_param {
const char *str;
int (*setup_func)(char *);
int early;
};
# 359 "./include/linux/init.h"
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) parse_early_param(void);
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) parse_early_options(char *cmdline);
# 7 "./include/linux/printk.h" 2
# 1 "./include/linux/kern_levels.h" 1
# 8 "./include/linux/printk.h" 2

# 1 "./include/linux/cache.h" 1





# 1 "./arch/riscv/include/asm/cache.h" 1
# 7 "./include/linux/cache.h" 2
# 10 "./include/linux/printk.h" 2
# 1 "./include/linux/ratelimit_types.h" 1





# 1 "./include/uapi/linux/param.h" 1




# 1 "./arch/riscv/include/generated/uapi/asm/param.h" 1
# 1 "./include/asm-generic/param.h" 1




# 1 "./include/uapi/asm-generic/param.h" 1
# 6 "./include/asm-generic/param.h" 2
# 2 "./arch/riscv/include/generated/uapi/asm/param.h" 2
# 6 "./include/uapi/linux/param.h" 2
# 7 "./include/linux/ratelimit_types.h" 2
# 1 "./include/linux/spinlock_types_raw.h" 1






# 1 "./arch/riscv/include/asm/spinlock_types.h" 1
# 13 "./arch/riscv/include/asm/spinlock_types.h"
typedef struct {
volatile unsigned int lock;
} arch_spinlock_t;



typedef struct {
volatile unsigned int lock;
} arch_rwlock_t;
# 8 "./include/linux/spinlock_types_raw.h" 2




# 1 "./include/linux/lockdep_types.h" 1
# 17 "./include/linux/lockdep_types.h"
enum lockdep_wait_type {
LD_WAIT_INV = 0,

LD_WAIT_FREE,
LD_WAIT_SPIN,




LD_WAIT_CONFIG = LD_WAIT_SPIN,

LD_WAIT_SLEEP,

LD_WAIT_MAX,
};

enum lockdep_lock_type {
LD_LOCK_NORMAL = 0,
LD_LOCK_PERCPU,
LD_LOCK_MAX,
};
# 69 "./include/linux/lockdep_types.h"
struct lockdep_subclass_key {
char __one_byte;
} __attribute__ ((__packed__));


struct lock_class_key {
union {
struct hlist_node hash_entry;
struct lockdep_subclass_key subkeys[8UL];
};
};

extern struct lock_class_key __lockdep_no_validate__;

struct lock_trace;







struct lock_class {



struct hlist_node hash_entry;






struct list_head lock_entry;






struct list_head locks_after, locks_before;

const struct lockdep_subclass_key *key;
unsigned int subclass;
unsigned int dep_gen_id;




unsigned long usage_mask;
const struct lock_trace *usage_traces[(2*4 + 2)];





int name_version;
const char *name;

u8 wait_type_inner;
u8 wait_type_outer;
u8 lock_type;






} ;
# 176 "./include/linux/lockdep_types.h"
struct lockdep_map {
struct lock_class_key *key;
struct lock_class *class_cache[2];
const char *name;
u8 wait_type_outer;
u8 wait_type_inner;
u8 lock_type;





};

struct pin_cookie { unsigned int val; };
# 13 "./include/linux/spinlock_types_raw.h" 2

typedef struct raw_spinlock {
arch_spinlock_t raw_lock;

unsigned int magic, owner_cpu;
void *owner;


struct lockdep_map dep_map;

} raw_spinlock_t;
# 8 "./include/linux/ratelimit_types.h" 2







struct ratelimit_state {
raw_spinlock_t lock;

int interval;
int burst;
int printed;
int missed;
unsigned long begin;
unsigned long flags;
};
# 40 "./include/linux/ratelimit_types.h"
extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
# 11 "./include/linux/printk.h" 2
# 1 "./include/linux/once_lite.h" 1
# 12 "./include/linux/printk.h" 2

extern const char linux_banner[];
extern const char linux_proc_banner[];

extern int oops_in_progress;



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int printk_get_level(const char *buffer)
{
if (buffer[0] == '\001' && buffer[1]) {
switch (buffer[1]) {
case '0' ... '7':
case 'c':
return buffer[1];
}
}
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *printk_skip_level(const char *buffer)
{
if (printk_get_level(buffer))
return buffer + 2;

return buffer;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *printk_skip_headers(const char *buffer)
{
while (printk_get_level(buffer))
buffer = printk_skip_level(buffer);

return buffer;
}
# 66 "./include/linux/printk.h"
extern int console_printk[];






extern void console_verbose(void);



extern char devkmsg_log_str[];
struct ctl_table;

extern int suppress_printk;

struct va_format {
const char *fmt;
va_list *va;
};
# 140 "./include/linux/printk.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__format__(printf, 1, 2))) __attribute__((__cold__))
void early_printk(const char *s, ...) { }


struct dev_printk_info;


__attribute__((__format__(printf, 4, 0)))
int vprintk_emit(int facility, int level,
const struct dev_printk_info *dev_info,
const char *fmt, va_list args);

__attribute__((__format__(printf, 1, 0)))
int vprintk(const char *fmt, va_list args);

__attribute__((__format__(printf, 1, 2))) __attribute__((__cold__))
int _printk(const char *fmt, ...);




__attribute__((__format__(printf, 1, 2))) __attribute__((__cold__)) int _printk_deferred(const char *fmt, ...);

extern void __printk_safe_enter(void);
extern void __printk_safe_exit(void);
# 178 "./include/linux/printk.h"
extern int __printk_ratelimit(const char *func);

extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msec);

extern int printk_delay_msec;
extern int dmesg_restrict;

extern void wake_up_klogd(void);

char *log_buf_addr_get(void);
u32 log_buf_len_get(void);
void log_buf_vmcoreinfo_setup(void);
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) setup_log_buf(int early);
__attribute__((__format__(printf, 1, 2))) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
extern void dump_stack_lvl(const char *log_lvl) __attribute__((__cold__));
extern void dump_stack(void) __attribute__((__cold__));
void printk_trigger_flush(void);
# 280 "./include/linux/printk.h"
extern int __printk_cpu_trylock(void);
extern void __printk_wait_on_cpu_lock(void);
extern void __printk_cpu_unlock(void);
# 320 "./include/linux/printk.h"
extern int kptr_restrict;
# 339 "./include/linux/printk.h"
struct module;
# 693 "./include/linux/printk.h"
extern const struct file_operations kmsg_fops;

enum {
DUMP_PREFIX_NONE,
DUMP_PREFIX_ADDRESS,
DUMP_PREFIX_OFFSET
};
extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
int groupsize, char *linebuf, size_t linebuflen,
bool ascii);

extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);
# 732 "./include/linux/printk.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void print_hex_dump_debug(const char *prefix_str, int prefix_type,
int rowsize, int groupsize,
const void *buf, size_t len, bool ascii)
{
}
# 30 "./include/linux/kernel.h" 2

# 1 "./include/linux/static_call_types.h" 1
# 32 "./include/linux/static_call_types.h"
struct static_call_site {
s32 addr;
s32 key;
};
# 94 "./include/linux/static_call_types.h"
struct static_call_key {
void *func;
};
# 32 "./include/linux/kernel.h" 2
# 1 "./include/linux/instruction_pointer.h" 1
# 33 "./include/linux/kernel.h" 2
# 94 "./include/linux/kernel.h"
struct completion;
struct user;



extern int __cond_resched(void);
# 125 "./include/linux/kernel.h"
extern void __might_resched(const char *file, int line, unsigned int offsets);
extern void __might_sleep(const char *file, int line);
extern void __cant_sleep(const char *file, int line, int preempt_offset);
extern void __cant_migrate(const char *file, int line);
# 198 "./include/linux/kernel.h"
void __might_fault(const char *file, int line);




void do_exit(long error_code) __attribute__((__noreturn__));

extern int num_to_str(char *buf, int size,
unsigned long long num, unsigned int width);



extern __attribute__((__format__(printf, 2, 3))) int sprintf(char *buf, const char * fmt, ...);
extern __attribute__((__format__(printf, 2, 0))) int vsprintf(char *buf, const char *, va_list);
extern __attribute__((__format__(printf, 3, 4)))
int snprintf(char *buf, size_t size, const char *fmt, ...);
extern __attribute__((__format__(printf, 3, 0)))
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
extern __attribute__((__format__(printf, 3, 4)))
int scnprintf(char *buf, size_t size, const char *fmt, ...);
extern __attribute__((__format__(printf, 3, 0)))
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
extern __attribute__((__format__(printf, 2, 3))) __attribute__((__malloc__))
char *kasprintf(gfp_t gfp, const char *fmt, ...);
extern __attribute__((__format__(printf, 2, 0))) __attribute__((__malloc__))
char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
extern __attribute__((__format__(printf, 2, 0)))
const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);

extern __attribute__((__format__(scanf, 2, 3)))
int sscanf(const char *, const char *, ...);
extern __attribute__((__format__(scanf, 2, 0)))
int vsscanf(const char *, const char *, va_list);

extern int no_hash_pointers_enable(char *str);

extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
extern bool parse_option_str(const char *str, const char *option);
extern char *next_arg(char *args, char **param, char **val);

extern int core_kernel_text(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);

extern void bust_spinlocks(int yes);

extern int root_mountflags;

extern bool early_boot_irqs_disabled;





extern enum system_states {
SYSTEM_BOOTING,
SYSTEM_SCHEDULING,
SYSTEM_FREEING_INITMEM,
SYSTEM_RUNNING,
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
SYSTEM_SUSPEND,
} system_state;

extern const char hex_asc[];



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) char *hex_byte_pack(char *buf, u8 byte)
{
*buf++ = hex_asc[((byte) & 0xf0) >> 4];
*buf++ = hex_asc[((byte) & 0x0f)];
return buf;
}

extern const char hex_asc_upper[];



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) char *hex_byte_pack_upper(char *buf, u8 byte)
{
*buf++ = hex_asc_upper[((byte) & 0xf0) >> 4];
*buf++ = hex_asc_upper[((byte) & 0x0f)];
return buf;
}

extern int hex_to_bin(unsigned char ch);
extern int __attribute__((__warn_unused_result__)) hex2bin(u8 *dst, const char *src, size_t count);
extern char *bin2hex(char *dst, const void *src, size_t count);

bool mac_pton(const char *s, u8 *mac);
# 314 "./include/linux/kernel.h"
enum ftrace_dump_mode {
DUMP_NONE,
DUMP_ALL,
DUMP_ORIG,
};


void tracing_on(void);
void tracing_off(void);
int tracing_is_on(void);
void tracing_snapshot(void);
void tracing_snapshot_alloc(void);

extern void tracing_start(void);
extern void tracing_stop(void);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__format__(printf, 1, 2)))
void ____trace_printk_check_format(const char *fmt, ...)
{
}
# 393 "./include/linux/kernel.h"
extern __attribute__((__format__(printf, 2, 3)))
int __trace_bprintk(unsigned long ip, const char *fmt, ...);

extern __attribute__((__format__(printf, 2, 3)))
int __trace_printk(unsigned long ip, const char *fmt, ...);
# 434 "./include/linux/kernel.h"
extern int __trace_bputs(unsigned long ip, const char *str);
extern int __trace_puts(unsigned long ip, const char *str, int size);

extern void trace_dump_stack(int skip);
# 456 "./include/linux/kernel.h"
extern __attribute__((__format__(printf, 2, 0))) int
__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);

extern __attribute__((__format__(printf, 2, 0))) int
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);

extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
# 9 "./include/linux/uio.h" 2
# 1 "./include/linux/thread_info.h" 1
# 13 "./include/linux/thread_info.h"
# 1 "./include/linux/bug.h" 1




# 1 "./arch/riscv/include/asm/bug.h" 1
# 30 "./arch/riscv/include/asm/bug.h"
typedef u32 bug_insn_t;
# 83 "./arch/riscv/include/asm/bug.h"
# 1 "./include/asm-generic/bug.h" 1





# 1 "./include/linux/instrumentation.h" 1
# 7 "./include/asm-generic/bug.h" 2
# 24 "./include/asm-generic/bug.h"
struct warn_args;
struct pt_regs;

void __warn(const char *file, int line, void *caller, unsigned taint,
struct pt_regs *regs, struct warn_args *args);




struct bug_entry {



signed int bug_addr_disp;





signed int file_disp;

unsigned short line;

unsigned short flags;
};
# 101 "./include/asm-generic/bug.h"
extern __attribute__((__format__(printf, 1, 2))) void __warn_printk(const char *fmt, ...);
# 84 "./arch/riscv/include/asm/bug.h" 2

struct pt_regs;
struct task_struct;

void __show_regs(struct pt_regs *regs);
void die(struct pt_regs *regs, const char *str);
void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr);
# 6 "./include/linux/bug.h" 2



enum bug_trap_type {
BUG_TRAP_TYPE_NONE = 0,
BUG_TRAP_TYPE_WARN = 1,
BUG_TRAP_TYPE_BUG = 2,
};

struct pt_regs;
# 34 "./include/linux/bug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int is_warning_bug(const struct bug_entry *bug)
{
return bug->flags & (1 << 0);
}

void bug_get_file_line(struct bug_entry *bug, const char **file,
unsigned int *line);

struct bug_entry *find_bug(unsigned long bugaddr);

enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);


int is_valid_bugaddr(unsigned long addr);

void generic_bug_clear_once(void);
# 80 "./include/linux/bug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) bool check_data_corruption(bool v) { return v; }
# 14 "./include/linux/thread_info.h" 2
# 1 "./include/linux/restart_block.h" 1
# 10 "./include/linux/restart_block.h"
# 1 "./include/linux/time64.h" 1




# 1 "./include/linux/math64.h" 1






# 1 "./include/vdso/math64.h" 1




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) u32
__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{
u32 ret = 0;

while (dividend >= divisor) {


asm("" : "+rm"(dividend));

dividend -= divisor;
ret++;
}

*remainder = dividend;

return ret;
}
# 8 "./include/linux/math64.h" 2
# 1 "./arch/riscv/include/generated/asm/div64.h" 1
# 9 "./include/linux/math64.h" 2
# 26 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
*remainder = dividend % divisor;
return dividend / divisor;
}
# 40 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
*remainder = dividend % divisor;
return dividend / divisor;
}
# 54 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
{
*remainder = dividend % divisor;
return dividend / divisor;
}
# 67 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 div64_u64(u64 dividend, u64 divisor)
{
return dividend / divisor;
}
# 79 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 div64_s64(s64 dividend, s64 divisor)
{
return dividend / divisor;
}
# 125 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 div_u64(u64 dividend, u32 divisor)
{
u32 remainder;
return div_u64_rem(dividend, divisor, &remainder);
}
# 138 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 div_s64(s64 dividend, s32 divisor)
{
s32 remainder;
return div_s64_rem(dividend, divisor, &remainder);
}


u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 mul_u32_u32(u32 a, u32 b)
{
return (u64)a * b;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
# 239 "./include/linux/math64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift)
{
u64 ret;





ret = mul_u64_u64_shr(__builtin_choose_expr( __builtin_types_compatible_p(typeof(a), signed long long) || __builtin_types_compatible_p(typeof(a), unsigned long long), ({ signed long long __x = (a); __x < 0 ? -__x : __x; }), __builtin_choose_expr( __builtin_types_compatible_p(typeof(a), signed long) || __builtin_types_compatible_p(typeof(a), unsigned long), ({ signed long __x = (a); __x < 0 ? -__x : __x; }), __builtin_choose_expr( __builtin_types_compatible_p(typeof(a), signed int) || __builtin_types_compatible_p(typeof(a), unsigned int), ({ signed int __x = (a); __x < 0 ? -__x : __x; }), __builtin_choose_expr( __builtin_types_compatible_p(typeof(a), signed short) || __builtin_types_compatible_p(typeof(a), unsigned short), ({ signed short __x = (a); __x < 0 ? -__x : __x; }), __builtin_choose_expr( __builtin_types_compatible_p(typeof(a), signed char) || __builtin_types_compatible_p(typeof(a), unsigned char), ({ signed char __x = (a); __x < 0 ? -__x : __x; }), __builtin_choose_expr( __builtin_types_compatible_p(typeof(a), char), (char)({ signed char __x = (a); __x<0?-__x:__x; }), ((void)0))))))), b, shift);

if (a < 0)
ret = -((s64) ret);

return ret;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
{
union {
u64 ll;
struct {



u32 low, high;

} l;
} u, rl, rh;

u.ll = a;
rl.ll = mul_u32_u32(u.l.low, mul);
rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;


rl.l.high = ({ uint32_t __base = (divisor); uint32_t __rem; __rem = ((uint64_t)(rh.ll)) % __base; (rh.ll) = ((uint64_t)(rh.ll)) / __base; __rem; });


({ uint32_t __base = (divisor); uint32_t __rem; __rem = ((uint64_t)(rl.ll)) % __base; (rl.ll) = ((uint64_t)(rl.ll)) / __base; __rem; });

rl.l.high = rh.l.low;
return rl.ll;
}


u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
# 6 "./include/linux/time64.h" 2
# 1 "./include/vdso/time64.h" 1
# 7 "./include/linux/time64.h" 2

typedef __s64 time64_t;
typedef __u64 timeu64_t;


# 1 "./include/uapi/linux/time.h" 1





# 1 "./include/uapi/linux/time_types.h" 1






struct __kernel_timespec {
__kernel_time64_t tv_sec;
long long tv_nsec;
};

struct __kernel_itimerspec {
struct __kernel_timespec it_interval;
struct __kernel_timespec it_value;
};
# 25 "./include/uapi/linux/time_types.h"
struct __kernel_old_timeval {
__kernel_long_t tv_sec;
__kernel_long_t tv_usec;
};


struct __kernel_old_timespec {
__kernel_old_time_t tv_sec;
long tv_nsec;
};

struct __kernel_old_itimerval {
struct __kernel_old_timeval it_interval;
struct __kernel_old_timeval it_value;
};

struct __kernel_sock_timeval {
__s64 tv_sec;
__s64 tv_usec;
};
# 7 "./include/uapi/linux/time.h" 2
# 33 "./include/uapi/linux/time.h"
struct timezone {
int tz_minuteswest;
int tz_dsttime;
};
# 12 "./include/linux/time64.h" 2

struct timespec64 {
time64_t tv_sec;
long tv_nsec;
};

struct itimerspec64 {
struct timespec64 it_interval;
struct timespec64 it_value;
};
# 43 "./include/linux/time64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int timespec64_equal(const struct timespec64 *a,
const struct timespec64 *b)
{
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
if (lhs->tv_sec > rhs->tv_sec)
return 1;
return lhs->tv_nsec - rhs->tv_nsec;
}

extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct timespec64 timespec64_add(struct timespec64 lhs,
struct timespec64 rhs)
{
struct timespec64 ts_delta;
set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec,
lhs.tv_nsec + rhs.tv_nsec);
return ts_delta;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct timespec64 timespec64_sub(struct timespec64 lhs,
struct timespec64 rhs)
{
struct timespec64 ts_delta;
set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec,
lhs.tv_nsec - rhs.tv_nsec);
return ts_delta;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool timespec64_valid(const struct timespec64 *ts)
{

if (ts->tv_sec < 0)
return false;

if ((unsigned long)ts->tv_nsec >= 1000000000L)
return false;
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool timespec64_valid_strict(const struct timespec64 *ts)
{
if (!timespec64_valid(ts))
return false;

if ((unsigned long long)ts->tv_sec >= (((s64)~((u64)1 << 63)) / 1000000000L))
return false;
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool timespec64_valid_settod(const struct timespec64 *ts)
{
if (!timespec64_valid(ts))
return false;

if ((unsigned long long)ts->tv_sec >= ((((s64)~((u64)1 << 63)) / 1000000000L) - (30LL * 365 * 24 *3600)))
return false;
return true;
}
# 127 "./include/linux/time64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 timespec64_to_ns(const struct timespec64 *ts)
{

if (ts->tv_sec >= (((s64)~((u64)1 << 63)) / 1000000000L))
return ((s64)~((u64)1 << 63));

if (ts->tv_sec <= ((-((s64)~((u64)1 << 63)) - 1) / 1000000000L))
return (-((s64)~((u64)1 << 63)) - 1);

return ((s64) ts->tv_sec * 1000000000L) + ts->tv_nsec;
}







extern struct timespec64 ns_to_timespec64(const s64 nsec);
# 155 "./include/linux/time64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void timespec64_add_ns(struct timespec64 *a, u64 ns)
{
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, 1000000000L, &ns);
a->tv_nsec = ns;
}





extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
const struct timespec64 rhs);
# 11 "./include/linux/restart_block.h" 2

struct timespec;
struct old_timespec32;
struct pollfd;

enum timespec_type {
TT_NONE = 0,
TT_NATIVE = 1,
TT_COMPAT = 2,
};




struct restart_block {
unsigned long arch_data;
long (*fn)(struct restart_block *);
union {

struct {
u32 *uaddr;
u32 val;
u32 flags;
u32 bitset;
u64 time;
u32 *uaddr2;
} futex;

struct {
clockid_t clockid;
enum timespec_type type;
union {
struct __kernel_timespec *rmtp;
struct old_timespec32 *compat_rmtp;
};
u64 expires;
} nanosleep;

struct {
struct pollfd *ufds;
int nfds;
int has_timeout;
unsigned long tv_sec;
unsigned long tv_nsec;
} poll;
};
};

extern long do_no_restart_syscall(struct restart_block *parm);
# 15 "./include/linux/thread_info.h" 2








# 1 "./arch/riscv/include/asm/current.h" 1
# 18 "./arch/riscv/include/asm/current.h"
struct task_struct;

register struct task_struct *riscv_current_is_tp __asm__("tp");
# 29 "./arch/riscv/include/asm/current.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) struct task_struct *get_current(void)
{
return riscv_current_is_tp;
}



register unsigned long current_stack_pointer __asm__("sp");
# 24 "./include/linux/thread_info.h" 2
# 33 "./include/linux/thread_info.h"
enum {
BAD_STACK = -1,
NOT_STACK = 0,
GOOD_FRAME,
GOOD_STACK,
};
# 60 "./include/linux/thread_info.h"
# 1 "./arch/riscv/include/asm/thread_info.h" 1
# 11 "./arch/riscv/include/asm/thread_info.h"
# 1 "./arch/riscv/include/asm/page.h" 1
# 12 "./arch/riscv/include/asm/page.h"
# 1 "./include/linux/pfn.h" 1
# 13 "./include/linux/pfn.h"
typedef struct {
u64 val;
} pfn_t;
# 13 "./arch/riscv/include/asm/page.h" 2
# 64 "./arch/riscv/include/asm/page.h"
typedef struct {
unsigned long pgd;
} pgd_t;


typedef struct {
unsigned long pte;
} pte_t;

typedef struct {
unsigned long pgprot;
} pgprot_t;

typedef struct page *pgtable_t;
# 94 "./arch/riscv/include/asm/page.h"
extern unsigned long riscv_pfn_base;





struct kernel_mapping {
unsigned long page_offset;
unsigned long virt_addr;
uintptr_t phys_addr;
uintptr_t size;

unsigned long va_pa_offset;

unsigned long va_kernel_pa_offset;
unsigned long va_kernel_xip_pa_offset;




};

extern struct kernel_mapping kernel_map;
extern phys_addr_t phys_ram_base;
# 189 "./arch/riscv/include/asm/page.h"
# 1 "./include/asm-generic/memory_model.h" 1
# 190 "./arch/riscv/include/asm/page.h" 2
# 1 "./include/asm-generic/getorder.h" 1
# 29 "./include/asm-generic/getorder.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__const__)) int get_order(unsigned long size)
{
if (__builtin_constant_p(size)) {
if (!size)
return 64 - (12);

if (size < (1UL << (12)))
return 0;

return ( __builtin_constant_p((size) - 1) ? (((size) - 1) < 2 ? 0 : 63 - __builtin_clzll((size) - 1)) : (sizeof((size) - 1) <= 4) ? __ilog2_u32((size) - 1) : __ilog2_u64((size) - 1) ) - (12) + 1;
}

size--;
size >>= (12);



return fls64(size);

}
# 191 "./arch/riscv/include/asm/page.h" 2
# 12 "./arch/riscv/include/asm/thread_info.h" 2
# 56 "./arch/riscv/include/asm/thread_info.h"
struct thread_info {
unsigned long flags;
int preempt_count;





long kernel_sp;
long user_sp;
int cpu;
};
# 61 "./include/linux/thread_info.h" 2







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long set_restart_fn(struct restart_block *restart,
long (*fn)(struct restart_block *))
{
restart->fn = fn;
do { } while (0);
return -516;
}
# 87 "./include/linux/thread_info.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_ti_thread_flag(struct thread_info *ti, int flag)
{
set_bit(flag, (unsigned long *)&ti->flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_ti_thread_flag(struct thread_info *ti, int flag)
{
clear_bit(flag, (unsigned long *)&ti->flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void update_ti_thread_flag(struct thread_info *ti, int flag,
bool value)
{
if (value)
set_ti_thread_flag(ti, flag);
else
clear_ti_thread_flag(ti, flag);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_set_bit(flag, (unsigned long *)&ti->flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_ti_thread_flag(struct thread_info *ti, int flag)
{
return arch_test_bit(flag, (unsigned long *)&ti->flags);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned long read_ti_thread_flags(struct thread_info *ti)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_0(void) ; if (!((sizeof(ti->flags) == sizeof(char) || sizeof(ti->flags) == sizeof(short) || sizeof(ti->flags) == sizeof(int) || sizeof(ti->flags) == sizeof(long)) || sizeof(ti->flags) == sizeof(long long))) __compiletime_assert_0(); } while (0); (*(const volatile typeof( _Generic((ti->flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ti->flags))) *)&(ti->flags)); });
}
# 183 "./include/linux/thread_info.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_within_stack_frames(const void * const stack,
const void * const stackend,
const void *obj, unsigned long len)
{
return 0;
}
# 202 "./include/linux/thread_info.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void check_object_size(const void *ptr, unsigned long n,
bool to_user)
{ }


extern void
__bad_copy_from(void);
extern void
__bad_copy_to(void);

void __copy_overflow(int size, unsigned long count);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void copy_overflow(int size, unsigned long count)
{
if (1)
__copy_overflow(size, count);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) bool
check_copy_size(const void *addr, size_t bytes, bool is_source)
{
int sz = __builtin_object_size(addr, 0);
if (__builtin_expect(!!(sz >= 0 && sz < bytes), 0)) {
if (!__builtin_constant_p(bytes))
copy_overflow(sz, bytes);
else if (is_source)
__bad_copy_from();
else
__bad_copy_to();
return false;
}
if (({ int __ret_warn_on = !!(bytes > ((int)(~0U >> 1))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/thread_info.h"), "i" (233), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }))
return false;
check_object_size(addr, bytes, is_source);
return true;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_setup_new_exec(void) { }
# 10 "./include/linux/uio.h" 2
# 1 "./include/linux/mm_types.h" 1




# 1 "./include/linux/mm_types_task.h" 1
# 13 "./include/linux/mm_types_task.h"
# 1 "./include/linux/atomic.h" 1






# 1 "./arch/riscv/include/asm/atomic.h" 1
# 19 "./arch/riscv/include/asm/atomic.h"
# 1 "./arch/riscv/include/asm/cmpxchg.h" 1
# 12 "./arch/riscv/include/asm/cmpxchg.h"
# 1 "./arch/riscv/include/asm/fence.h" 1
# 13 "./arch/riscv/include/asm/cmpxchg.h" 2
# 20 "./arch/riscv/include/asm/atomic.h" 2








static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_read(const atomic_t *v)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_1(void) ; if (!((sizeof(v->counter) == sizeof(char) || sizeof(v->counter) == sizeof(short) || sizeof(v->counter) == sizeof(int) || sizeof(v->counter) == sizeof(long)) || sizeof(v->counter) == sizeof(long long))) __compiletime_assert_1(); } while (0); (*(const volatile typeof( _Generic((v->counter), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (v->counter))) *)&(v->counter)); });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void arch_atomic_set(atomic_t *v, int i)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_2(void) ; if (!((sizeof(v->counter) == sizeof(char) || sizeof(v->counter) == sizeof(short) || sizeof(v->counter) == sizeof(int) || sizeof(v->counter) == sizeof(long)) || sizeof(v->counter) == sizeof(long long))) __compiletime_assert_2(); } while (0); do { *(volatile typeof(v->counter) *)&(v->counter) = (i); } while (0); } while (0);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_read(const atomic64_t *v)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_3(void) ; if (!((sizeof(v->counter) == sizeof(char) || sizeof(v->counter) == sizeof(short) || sizeof(v->counter) == sizeof(int) || sizeof(v->counter) == sizeof(long)) || sizeof(v->counter) == sizeof(long long))) __compiletime_assert_3(); } while (0); (*(const volatile typeof( _Generic((v->counter), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (v->counter))) *)&(v->counter)); });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void arch_atomic64_set(atomic64_t *v, s64 i)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_4(void) ; if (!((sizeof(v->counter) == sizeof(char) || sizeof(v->counter) == sizeof(short) || sizeof(v->counter) == sizeof(int) || sizeof(v->counter) == sizeof(long)) || sizeof(v->counter) == sizeof(long long))) __compiletime_assert_4(); } while (0); do { *(volatile typeof(v->counter) *)&(v->counter) = (i); } while (0); } while (0);
}
# 74 "./arch/riscv/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void arch_atomic_add(int i, atomic_t *v) { __asm__ __volatile__ ( " amo" "add" "." "w" " zero, %1, %0" : "+A" (v->counter) : "r" (i) : "memory"); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void arch_atomic64_add(s64 i, atomic64_t *v) { __asm__ __volatile__ ( " amo" "add" "." "d" " zero, %1, %0" : "+A" (v->counter) : "r" (i) : "memory"); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void arch_atomic_sub(int i, atomic_t *v) { __asm__ __volatile__ ( " amo" "add" "." "w" " zero, %1, %0" : "+A" (v->counter) : "r" (-i) : "memory"); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void arch_atomic64_sub(s64 i, atomic64_t *v) { __asm__ __volatile__ ( " amo" "add" "." "d" " zero, %1, %0" : "+A" (v->counter) : "r" (-i) : "memory"); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void arch_atomic_and(int i, atomic_t *v) { __asm__ __volatile__ ( " amo" "and" "." "w" " zero, %1, %0" : "+A" (v->counter) : "r" (i) : "memory"); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void arch_atomic64_and(s64 i, atomic64_t *v) { __asm__ __volatile__ ( " amo" "and" "." "d" " zero, %1, %0" : "+A" (v->counter) : "r" (i) : "memory"); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void arch_atomic_or(int i, atomic_t *v) { __asm__ __volatile__ ( " amo" "or" "." "w" " zero, %1, %0" : "+A" (v->counter) : "r" (i) : "memory"); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void arch_atomic64_or(s64 i, atomic64_t *v) { __asm__ __volatile__ ( " amo" "or" "." "d" " zero, %1, %0" : "+A" (v->counter) : "r" (i) : "memory"); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void arch_atomic_xor(int i, atomic_t *v) { __asm__ __volatile__ ( " amo" "xor" "." "w" " zero, %1, %0" : "+A" (v->counter) : "r" (i) : "memory"); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void arch_atomic64_xor(s64 i, atomic64_t *v) { __asm__ __volatile__ ( " amo" "xor" "." "d" " zero, %1, %0" : "+A" (v->counter) : "r" (i) : "memory"); }
# 138 "./arch/riscv/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_fetch_add_relaxed(int i, atomic_t *v) { register int ret; __asm__ __volatile__ ( " amo" "add" "." "w" " %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_fetch_add(int i, atomic_t *v) { register int ret; __asm__ __volatile__ ( " amo" "add" "." "w" ".aqrl %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_add_return_relaxed(int i, atomic_t *v) { return arch_atomic_fetch_add_relaxed(i, v) + i; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_add_return(int i, atomic_t *v) { return arch_atomic_fetch_add(i, v) + i; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { register s64 ret; __asm__ __volatile__ ( " amo" "add" "." "d" " %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) { register s64 ret; __asm__ __volatile__ ( " amo" "add" "." "d" ".aqrl %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_add_return_relaxed(s64 i, atomic64_t *v) { return arch_atomic64_fetch_add_relaxed(i, v) + i; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_add_return(s64 i, atomic64_t *v) { return arch_atomic64_fetch_add(i, v) + i; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_fetch_sub_relaxed(int i, atomic_t *v) { register int ret; __asm__ __volatile__ ( " amo" "add" "." "w" " %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (-i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_fetch_sub(int i, atomic_t *v) { register int ret; __asm__ __volatile__ ( " amo" "add" "." "w" ".aqrl %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (-i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_sub_return_relaxed(int i, atomic_t *v) { return arch_atomic_fetch_sub_relaxed(i, v) + -i; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_sub_return(int i, atomic_t *v) { return arch_atomic_fetch_sub(i, v) + -i; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { register s64 ret; __asm__ __volatile__ ( " amo" "add" "." "d" " %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (-i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v) { register s64 ret; __asm__ __volatile__ ( " amo" "add" "." "d" ".aqrl %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (-i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { return arch_atomic64_fetch_sub_relaxed(i, v) + -i; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_sub_return(s64 i, atomic64_t *v) { return arch_atomic64_fetch_sub(i, v) + -i; }
# 174 "./arch/riscv/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_fetch_and_relaxed(int i, atomic_t *v) { register int ret; __asm__ __volatile__ ( " amo" "and" "." "w" " %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_fetch_and(int i, atomic_t *v) { register int ret; __asm__ __volatile__ ( " amo" "and" "." "w" ".aqrl %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { register s64 ret; __asm__ __volatile__ ( " amo" "and" "." "d" " %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v) { register s64 ret; __asm__ __volatile__ ( " amo" "and" "." "d" ".aqrl %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_fetch_or_relaxed(int i, atomic_t *v) { register int ret; __asm__ __volatile__ ( " amo" "or" "." "w" " %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_fetch_or(int i, atomic_t *v) { register int ret; __asm__ __volatile__ ( " amo" "or" "." "w" ".aqrl %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { register s64 ret; __asm__ __volatile__ ( " amo" "or" "." "d" " %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v) { register s64 ret; __asm__ __volatile__ ( " amo" "or" "." "d" ".aqrl %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_fetch_xor_relaxed(int i, atomic_t *v) { register int ret; __asm__ __volatile__ ( " amo" "xor" "." "w" " %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_fetch_xor(int i, atomic_t *v) { register int ret; __asm__ __volatile__ ( " amo" "xor" "." "w" ".aqrl %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { register s64 ret; __asm__ __volatile__ ( " amo" "xor" "." "d" " %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v) { register s64 ret; __asm__ __volatile__ ( " amo" "xor" "." "d" ".aqrl %1, %2, %0" : "+A" (v->counter), "=r" (ret) : "r" (i) : "memory"); return ret; }
# 200 "./arch/riscv/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;

__asm__ __volatile__ (
"0: lr.w %[p], %[c]\n"
" beq %[p], %[u], 1f\n"
" add %[rc], %[p], %[a]\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
: "memory");
return prev;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 prev;
long rc;

__asm__ __volatile__ (
"0: lr.d %[p], %[c]\n"
" beq %[p], %[u], 1f\n"
" add %[rc], %[p], %[a]\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
: "memory");
return prev;
}
# 299 "./arch/riscv/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_xchg_relaxed(atomic_t *v, int n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(n) __new = (n); __typeof__(*(&(v->counter))) __ret; switch (4) { case 4: __asm__ __volatile__ ( " amoswap.w %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_5(void) ; if (!(!(1))) __compiletime_assert_5(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_xchg_acquire(atomic_t *v, int n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(n) __new = (n); __typeof__(*(&(v->counter))) __ret; switch (4) { case 4: __asm__ __volatile__ ( " amoswap.w %0, %2, %1\n" "\tfence r , rw\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d %0, %2, %1\n" "\tfence r , rw\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_6(void) ; if (!(!(1))) __compiletime_assert_6(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_xchg_release(atomic_t *v, int n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(n) __new = (n); __typeof__(*(&(v->counter))) __ret; switch (4) { case 4: __asm__ __volatile__ ( "\tfence rw, w\n" " amoswap.w %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( "\tfence rw, w\n" " amoswap.d %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_7(void) ; if (!(!(1))) __compiletime_assert_7(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_xchg(atomic_t *v, int n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(n) __new = (n); __typeof__(*(&(v->counter))) __ret; switch (4) { case 4: __asm__ __volatile__ ( " amoswap.w.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_8(void) ; if (!(!(1))) __compiletime_assert_8(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_cmpxchg_relaxed(atomic_t *v, int o, int n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(*(&(v->counter))) __old = (o); __typeof__(*(&(v->counter))) __new = (n); __typeof__(*(&(v->counter))) __ret; register unsigned int __rc; switch (4) { case 4: __asm__ __volatile__ ( "0: lr.w %0, %2\n" " bne %0, %z3, 1f\n" " sc.w %1, %z4, %2\n" " bnez %1, 0b\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" ((long)__old), "rJ" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( "0: lr.d %0, %2\n" " bne %0, %z3, 1f\n" " sc.d %1, %z4, %2\n" " bnez %1, 0b\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" (__old), "rJ" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_9(void) ; if (!(!(1))) __compiletime_assert_9(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_cmpxchg_acquire(atomic_t *v, int o, int n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(*(&(v->counter))) __old = (o); __typeof__(*(&(v->counter))) __new = (n); __typeof__(*(&(v->counter))) __ret; register unsigned int __rc; switch (4) { case 4: __asm__ __volatile__ ( "0: lr.w %0, %2\n" " bne %0, %z3, 1f\n" " sc.w %1, %z4, %2\n" " bnez %1, 0b\n" "\tfence r , rw\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" ((long)__old), "rJ" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( "0: lr.d %0, %2\n" " bne %0, %z3, 1f\n" " sc.d %1, %z4, %2\n" " bnez %1, 0b\n" "\tfence r , rw\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" (__old), "rJ" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_10(void) ; if (!(!(1))) __compiletime_assert_10(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_cmpxchg_release(atomic_t *v, int o, int n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(*(&(v->counter))) __old = (o); __typeof__(*(&(v->counter))) __new = (n); __typeof__(*(&(v->counter))) __ret; register unsigned int __rc; switch (4) { case 4: __asm__ __volatile__ ( "\tfence rw, w\n" "0: lr.w %0, %2\n" " bne %0, %z3, 1f\n" " sc.w %1, %z4, %2\n" " bnez %1, 0b\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" ((long)__old), "rJ" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( "\tfence rw, w\n" "0: lr.d %0, %2\n" " bne %0, %z3, 1f\n" " sc.d %1, %z4, %2\n" " bnez %1, 0b\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" (__old), "rJ" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_11(void) ; if (!(!(1))) __compiletime_assert_11(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_cmpxchg(atomic_t *v, int o, int n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(*(&(v->counter))) __old = (o); __typeof__(*(&(v->counter))) __new = (n); __typeof__(*(&(v->counter))) __ret; register unsigned int __rc; switch (4) { case 4: __asm__ __volatile__ ( "0: lr.w %0, %2\n" " bne %0, %z3, 1f\n" " sc.w.rl %1, %z4, %2\n" " bnez %1, 0b\n" " fence rw, rw\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" ((long)__old), "rJ" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( "0: lr.d %0, %2\n" " bne %0, %z3, 1f\n" " sc.d.rl %1, %z4, %2\n" " bnez %1, 0b\n" " fence rw, rw\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" (__old), "rJ" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_12(void) ; if (!(!(1))) __compiletime_assert_12(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_xchg_relaxed(atomic64_t *v, s64 n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(n) __new = (n); __typeof__(*(&(v->counter))) __ret; switch (8) { case 4: __asm__ __volatile__ ( " amoswap.w %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_13(void) ; if (!(!(1))) __compiletime_assert_13(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_xchg_acquire(atomic64_t *v, s64 n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(n) __new = (n); __typeof__(*(&(v->counter))) __ret; switch (8) { case 4: __asm__ __volatile__ ( " amoswap.w %0, %2, %1\n" "\tfence r , rw\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d %0, %2, %1\n" "\tfence r , rw\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_14(void) ; if (!(!(1))) __compiletime_assert_14(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_xchg_release(atomic64_t *v, s64 n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(n) __new = (n); __typeof__(*(&(v->counter))) __ret; switch (8) { case 4: __asm__ __volatile__ ( "\tfence rw, w\n" " amoswap.w %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( "\tfence rw, w\n" " amoswap.d %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_15(void) ; if (!(!(1))) __compiletime_assert_15(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_xchg(atomic64_t *v, s64 n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(n) __new = (n); __typeof__(*(&(v->counter))) __ret; switch (8) { case 4: __asm__ __volatile__ ( " amoswap.w.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_16(void) ; if (!(!(1))) __compiletime_assert_16(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 o, s64 n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(*(&(v->counter))) __old = (o); __typeof__(*(&(v->counter))) __new = (n); __typeof__(*(&(v->counter))) __ret; register unsigned int __rc; switch (8) { case 4: __asm__ __volatile__ ( "0: lr.w %0, %2\n" " bne %0, %z3, 1f\n" " sc.w %1, %z4, %2\n" " bnez %1, 0b\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" ((long)__old), "rJ" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( "0: lr.d %0, %2\n" " bne %0, %z3, 1f\n" " sc.d %1, %z4, %2\n" " bnez %1, 0b\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" (__old), "rJ" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_17(void) ; if (!(!(1))) __compiletime_assert_17(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 o, s64 n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(*(&(v->counter))) __old = (o); __typeof__(*(&(v->counter))) __new = (n); __typeof__(*(&(v->counter))) __ret; register unsigned int __rc; switch (8) { case 4: __asm__ __volatile__ ( "0: lr.w %0, %2\n" " bne %0, %z3, 1f\n" " sc.w %1, %z4, %2\n" " bnez %1, 0b\n" "\tfence r , rw\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" ((long)__old), "rJ" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( "0: lr.d %0, %2\n" " bne %0, %z3, 1f\n" " sc.d %1, %z4, %2\n" " bnez %1, 0b\n" "\tfence r , rw\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" (__old), "rJ" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_18(void) ; if (!(!(1))) __compiletime_assert_18(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_cmpxchg_release(atomic64_t *v, s64 o, s64 n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(*(&(v->counter))) __old = (o); __typeof__(*(&(v->counter))) __new = (n); __typeof__(*(&(v->counter))) __ret; register unsigned int __rc; switch (8) { case 4: __asm__ __volatile__ ( "\tfence rw, w\n" "0: lr.w %0, %2\n" " bne %0, %z3, 1f\n" " sc.w %1, %z4, %2\n" " bnez %1, 0b\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" ((long)__old), "rJ" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( "\tfence rw, w\n" "0: lr.d %0, %2\n" " bne %0, %z3, 1f\n" " sc.d %1, %z4, %2\n" " bnez %1, 0b\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" (__old), "rJ" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_19(void) ; if (!(!(1))) __compiletime_assert_19(); } while (0); } __ret; }); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) { return ({ __typeof__(&(v->counter)) __ptr = (&(v->counter)); __typeof__(*(&(v->counter))) __old = (o); __typeof__(*(&(v->counter))) __new = (n); __typeof__(*(&(v->counter))) __ret; register unsigned int __rc; switch (8) { case 4: __asm__ __volatile__ ( "0: lr.w %0, %2\n" " bne %0, %z3, 1f\n" " sc.w.rl %1, %z4, %2\n" " bnez %1, 0b\n" " fence rw, rw\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" ((long)__old), "rJ" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( "0: lr.d %0, %2\n" " bne %0, %z3, 1f\n" " sc.d.rl %1, %z4, %2\n" " bnez %1, 0b\n" " fence rw, rw\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" (__old), "rJ" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_20(void) ; if (!(!(1))) __compiletime_assert_20(); } while (0); } __ret; }); }
# 313 "./arch/riscv/include/asm/atomic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int arch_atomic_sub_if_positive(atomic_t *v, int offset)
{
int prev, rc;

__asm__ __volatile__ (
"0: lr.w %[p], %[c]\n"
" sub %[rc], %[p], %[o]\n"
" bltz %[rc], 1f\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [o]"r" (offset)
: "memory");
return prev - offset;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offset)
{
s64 prev;
long rc;

__asm__ __volatile__ (
"0: lr.d %[p], %[c]\n"
" sub %[rc], %[p], %[o]\n"
" bltz %[rc], 1f\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [o]"r" (offset)
: "memory");
return prev - offset;
}
# 8 "./include/linux/atomic.h" 2
# 80 "./include/linux/atomic.h"
# 1 "./include/linux/atomic/atomic-arch-fallback.h" 1
# 151 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_read_acquire(const atomic_t *v)
{
int ret;

if ((sizeof(atomic_t) == sizeof(char) || sizeof(atomic_t) == sizeof(short) || sizeof(atomic_t) == sizeof(int) || sizeof(atomic_t) == sizeof(long))) {
ret = ({ typeof(*&(v)->counter) ___p1 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_21(void) ; if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)) || sizeof(*&(v)->counter) == sizeof(long long))) __compiletime_assert_21(); } while (0); (*(const volatile typeof( _Generic((*&(v)->counter), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&(v)->counter))) *)&(*&(v)->counter)); }); do { __attribute__((__noreturn__)) extern void __compiletime_assert_22(void) ; if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)))) __compiletime_assert_22(); } while (0); __asm__ __volatile__ ("fence " "r" "," "rw" : : : "memory"); ___p1; });
} else {
ret = arch_atomic_read(v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
}

return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_set_release(atomic_t *v, int i)
{
if ((sizeof(atomic_t) == sizeof(char) || sizeof(atomic_t) == sizeof(short) || sizeof(atomic_t) == sizeof(int) || sizeof(atomic_t) == sizeof(long))) {
do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_23(void) ; if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)))) __compiletime_assert_23(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_24(void) ; if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)) || sizeof(*&(v)->counter) == sizeof(long long))) __compiletime_assert_24(); } while (0); do { *(volatile typeof(*&(v)->counter) *)&(*&(v)->counter) = (i); } while (0); } while (0); } while (0); } while (0);
} else {
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
arch_atomic_set(v, i);
}
}
# 189 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_add_return_acquire(int i, atomic_t *v)
{
int ret = arch_atomic_add_return_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_add_return_release(int i, atomic_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic_add_return_relaxed(i, v);
}
# 231 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_add_acquire(int i, atomic_t *v)
{
int ret = arch_atomic_fetch_add_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_add_release(int i, atomic_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic_fetch_add_relaxed(i, v);
}
# 273 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_sub_return_acquire(int i, atomic_t *v)
{
int ret = arch_atomic_sub_return_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_sub_return_release(int i, atomic_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic_sub_return_relaxed(i, v);
}
# 315 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
{
int ret = arch_atomic_fetch_sub_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_sub_release(int i, atomic_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic_fetch_sub_relaxed(i, v);
}
# 351 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_inc(atomic_t *v)
{
arch_atomic_add(1, v);
}
# 367 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_inc_return(atomic_t *v)
{
return arch_atomic_add_return(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_inc_return_acquire(atomic_t *v)
{
return arch_atomic_add_return_acquire(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_inc_return_release(atomic_t *v)
{
return arch_atomic_add_return_release(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_inc_return_relaxed(atomic_t *v)
{
return arch_atomic_add_return_relaxed(1, v);
}
# 448 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_inc(atomic_t *v)
{
return arch_atomic_fetch_add(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_inc_acquire(atomic_t *v)
{
return arch_atomic_fetch_add_acquire(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_inc_release(atomic_t *v)
{
return arch_atomic_fetch_add_release(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_inc_relaxed(atomic_t *v)
{
return arch_atomic_fetch_add_relaxed(1, v);
}
# 522 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_dec(atomic_t *v)
{
arch_atomic_sub(1, v);
}
# 538 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_dec_return(atomic_t *v)
{
return arch_atomic_sub_return(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_dec_return_acquire(atomic_t *v)
{
return arch_atomic_sub_return_acquire(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_dec_return_release(atomic_t *v)
{
return arch_atomic_sub_return_release(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_dec_return_relaxed(atomic_t *v)
{
return arch_atomic_sub_return_relaxed(1, v);
}
# 619 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_dec(atomic_t *v)
{
return arch_atomic_fetch_sub(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_dec_acquire(atomic_t *v)
{
return arch_atomic_fetch_sub_acquire(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_dec_release(atomic_t *v)
{
return arch_atomic_fetch_sub_release(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_dec_relaxed(atomic_t *v)
{
return arch_atomic_fetch_sub_relaxed(1, v);
}
# 699 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_and_acquire(int i, atomic_t *v)
{
int ret = arch_atomic_fetch_and_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_and_release(int i, atomic_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic_fetch_and_relaxed(i, v);
}
# 735 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_andnot(int i, atomic_t *v)
{
arch_atomic_and(~i, v);
}
# 751 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_andnot(int i, atomic_t *v)
{
return arch_atomic_fetch_and(~i, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
return arch_atomic_fetch_and_acquire(~i, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_andnot_release(int i, atomic_t *v)
{
return arch_atomic_fetch_and_release(~i, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
return arch_atomic_fetch_and_relaxed(~i, v);
}
# 831 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_or_acquire(int i, atomic_t *v)
{
int ret = arch_atomic_fetch_or_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_or_release(int i, atomic_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic_fetch_or_relaxed(i, v);
}
# 873 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
{
int ret = arch_atomic_fetch_xor_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
arch_atomic_fetch_xor_release(int i, atomic_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic_fetch_xor_relaxed(i, v);
}
# 1000 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
int r, o = *old;
r = arch_atomic_cmpxchg(v, o, new);
if (__builtin_expect(!!(r != o), 0))
*old = r;
return __builtin_expect(!!(r == o), 1);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
int r, o = *old;
r = arch_atomic_cmpxchg_acquire(v, o, new);
if (__builtin_expect(!!(r != o), 0))
*old = r;
return __builtin_expect(!!(r == o), 1);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
int r, o = *old;
r = arch_atomic_cmpxchg_release(v, o, new);
if (__builtin_expect(!!(r != o), 0))
*old = r;
return __builtin_expect(!!(r == o), 1);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
int r, o = *old;
r = arch_atomic_cmpxchg_relaxed(v, o, new);
if (__builtin_expect(!!(r != o), 0))
*old = r;
return __builtin_expect(!!(r == o), 1);
}
# 1099 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_sub_and_test(int i, atomic_t *v)
{
return arch_atomic_sub_return(i, v) == 0;
}
# 1116 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_dec_and_test(atomic_t *v)
{
return arch_atomic_dec_return(v) == 0;
}
# 1133 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_inc_and_test(atomic_t *v)
{
return arch_atomic_inc_return(v) == 0;
}
# 1151 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_add_negative(int i, atomic_t *v)
{
return arch_atomic_add_return(i, v) < 0;
}
# 1194 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_add_unless(atomic_t *v, int a, int u)
{
return arch_atomic_fetch_add_unless(v, a, u) != u;
}
# 1210 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_inc_not_zero(atomic_t *v)
{
return arch_atomic_add_unless(v, 1, 0);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_inc_unless_negative(atomic_t *v)
{
int c = arch_atomic_read(v);

do {
if (__builtin_expect(!!(c < 0), 0))
return false;
} while (!arch_atomic_try_cmpxchg(v, &c, c + 1));

return true;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_dec_unless_positive(atomic_t *v)
{
int c = arch_atomic_read(v);

do {
if (__builtin_expect(!!(c > 0), 0))
return false;
} while (!arch_atomic_try_cmpxchg(v, &c, c - 1));

return true;
}
# 1272 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_read_acquire(const atomic64_t *v)
{
s64 ret;

if ((sizeof(atomic64_t) == sizeof(char) || sizeof(atomic64_t) == sizeof(short) || sizeof(atomic64_t) == sizeof(int) || sizeof(atomic64_t) == sizeof(long))) {
ret = ({ typeof(*&(v)->counter) ___p1 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_25(void) ; if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)) || sizeof(*&(v)->counter) == sizeof(long long))) __compiletime_assert_25(); } while (0); (*(const volatile typeof( _Generic((*&(v)->counter), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&(v)->counter))) *)&(*&(v)->counter)); }); do { __attribute__((__noreturn__)) extern void __compiletime_assert_26(void) ; if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)))) __compiletime_assert_26(); } while (0); __asm__ __volatile__ ("fence " "r" "," "rw" : : : "memory"); ___p1; });
} else {
ret = arch_atomic64_read(v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
}

return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic64_set_release(atomic64_t *v, s64 i)
{
if ((sizeof(atomic64_t) == sizeof(char) || sizeof(atomic64_t) == sizeof(short) || sizeof(atomic64_t) == sizeof(int) || sizeof(atomic64_t) == sizeof(long))) {
do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_27(void) ; if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)))) __compiletime_assert_27(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_28(void) ; if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)) || sizeof(*&(v)->counter) == sizeof(long long))) __compiletime_assert_28(); } while (0); do { *(volatile typeof(*&(v)->counter) *)&(*&(v)->counter) = (i); } while (0); } while (0); } while (0); } while (0);
} else {
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
arch_atomic64_set(v, i);
}
}
# 1310 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
s64 ret = arch_atomic64_add_return_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_add_return_release(s64 i, atomic64_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic64_add_return_relaxed(i, v);
}
# 1352 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic64_fetch_add_relaxed(i, v);
}
# 1394 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
s64 ret = arch_atomic64_sub_return_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic64_sub_return_relaxed(i, v);
}
# 1436 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic64_fetch_sub_relaxed(i, v);
}
# 1472 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic64_inc(atomic64_t *v)
{
arch_atomic64_add(1, v);
}
# 1488 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_inc_return(atomic64_t *v)
{
return arch_atomic64_add_return(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_inc_return_acquire(atomic64_t *v)
{
return arch_atomic64_add_return_acquire(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_inc_return_release(atomic64_t *v)
{
return arch_atomic64_add_return_release(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_inc_return_relaxed(atomic64_t *v)
{
return arch_atomic64_add_return_relaxed(1, v);
}
# 1569 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_inc(atomic64_t *v)
{
return arch_atomic64_fetch_add(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_inc_acquire(atomic64_t *v)
{
return arch_atomic64_fetch_add_acquire(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_inc_release(atomic64_t *v)
{
return arch_atomic64_fetch_add_release(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
{
return arch_atomic64_fetch_add_relaxed(1, v);
}
# 1643 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic64_dec(atomic64_t *v)
{
arch_atomic64_sub(1, v);
}
# 1659 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_dec_return(atomic64_t *v)
{
return arch_atomic64_sub_return(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_dec_return_acquire(atomic64_t *v)
{
return arch_atomic64_sub_return_acquire(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_dec_return_release(atomic64_t *v)
{
return arch_atomic64_sub_return_release(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_dec_return_relaxed(atomic64_t *v)
{
return arch_atomic64_sub_return_relaxed(1, v);
}
# 1740 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_dec(atomic64_t *v)
{
return arch_atomic64_fetch_sub(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_dec_acquire(atomic64_t *v)
{
return arch_atomic64_fetch_sub_acquire(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_dec_release(atomic64_t *v)
{
return arch_atomic64_fetch_sub_release(1, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
{
return arch_atomic64_fetch_sub_relaxed(1, v);
}
# 1820 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic64_fetch_and_relaxed(i, v);
}
# 1856 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic64_andnot(s64 i, atomic64_t *v)
{
arch_atomic64_and(~i, v);
}
# 1872 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_and(~i, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_and_acquire(~i, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_and_release(~i, v);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_and_relaxed(~i, v);
}
# 1952 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic64_fetch_or_relaxed(i, v);
}
# 1994 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
__asm__ __volatile__("\tfence r , rw\n" "" ::: "memory");
return ret;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
__asm__ __volatile__("\tfence rw, w\n" "" ::: "memory");;
return arch_atomic64_fetch_xor_relaxed(i, v);
}
# 2121 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
r = arch_atomic64_cmpxchg(v, o, new);
if (__builtin_expect(!!(r != o), 0))
*old = r;
return __builtin_expect(!!(r == o), 1);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
r = arch_atomic64_cmpxchg(v, o, new);
if (__builtin_expect(!!(r != o), 0))
*old = r;
return __builtin_expect(!!(r == o), 1);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
r = arch_atomic64_cmpxchg(v, o, new);
if (__builtin_expect(!!(r != o), 0))
*old = r;
return __builtin_expect(!!(r == o), 1);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
r = arch_atomic64_cmpxchg(v, o, new);
if (__builtin_expect(!!(r != o), 0))
*old = r;
return __builtin_expect(!!(r == o), 1);
}
# 2220 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
{
return arch_atomic64_sub_return(i, v) == 0;
}
# 2237 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic64_dec_and_test(atomic64_t *v)
{
return arch_atomic64_dec_return(v) == 0;
}
# 2254 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic64_inc_and_test(atomic64_t *v)
{
return arch_atomic64_inc_return(v) == 0;
}
# 2272 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic64_add_negative(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return(i, v) < 0;
}
# 2315 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
return arch_atomic64_fetch_add_unless(v, a, u) != u;
}
# 2331 "./include/linux/atomic/atomic-arch-fallback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic64_inc_not_zero(atomic64_t *v)
{
return arch_atomic64_add_unless(v, 1, 0);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic64_inc_unless_negative(atomic64_t *v)
{
s64 c = arch_atomic64_read(v);

do {
if (__builtin_expect(!!(c < 0), 0))
return false;
} while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));

return true;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic64_dec_unless_positive(atomic64_t *v)
{
s64 c = arch_atomic64_read(v);

do {
if (__builtin_expect(!!(c > 0), 0))
return false;
} while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));

return true;
}
# 81 "./include/linux/atomic.h" 2
# 1 "./include/linux/atomic/atomic-long.h" 1
# 10 "./include/linux/atomic/atomic-long.h"
# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 11 "./include/linux/atomic/atomic-long.h" 2


typedef atomic64_t atomic_long_t;
# 26 "./include/linux/atomic/atomic-long.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_read(const atomic_long_t *v)
{
return arch_atomic64_read(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_read_acquire(const atomic_long_t *v)
{
return arch_atomic64_read_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_long_set(atomic_long_t *v, long i)
{
arch_atomic64_set(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_long_set_release(atomic_long_t *v, long i)
{
arch_atomic64_set_release(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_long_add(long i, atomic_long_t *v)
{
arch_atomic64_add(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_add_return(long i, atomic_long_t *v)
{
return arch_atomic64_add_return(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
return arch_atomic64_add_return_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_add_return_release(long i, atomic_long_t *v)
{
return arch_atomic64_add_return_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
return arch_atomic64_add_return_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_add(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_add(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_add_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_add_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_add_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_long_sub(long i, atomic_long_t *v)
{
arch_atomic64_sub(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_sub_return(long i, atomic_long_t *v)
{
return arch_atomic64_sub_return(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
return arch_atomic64_sub_return_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_sub_return_release(long i, atomic_long_t *v)
{
return arch_atomic64_sub_return_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
return arch_atomic64_sub_return_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_sub(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_sub(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_sub_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_sub_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_sub_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_long_inc(atomic_long_t *v)
{
arch_atomic64_inc(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_inc_return(atomic_long_t *v)
{
return arch_atomic64_inc_return(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_inc_return_acquire(atomic_long_t *v)
{
return arch_atomic64_inc_return_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_inc_return_release(atomic_long_t *v)
{
return arch_atomic64_inc_return_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_inc_return_relaxed(atomic_long_t *v)
{
return arch_atomic64_inc_return_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_inc(atomic_long_t *v)
{
return arch_atomic64_fetch_inc(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
return arch_atomic64_fetch_inc_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_inc_release(atomic_long_t *v)
{
return arch_atomic64_fetch_inc_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
return arch_atomic64_fetch_inc_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_long_dec(atomic_long_t *v)
{
arch_atomic64_dec(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_dec_return(atomic_long_t *v)
{
return arch_atomic64_dec_return(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_dec_return_acquire(atomic_long_t *v)
{
return arch_atomic64_dec_return_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_dec_return_release(atomic_long_t *v)
{
return arch_atomic64_dec_return_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_dec_return_relaxed(atomic_long_t *v)
{
return arch_atomic64_dec_return_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_dec(atomic_long_t *v)
{
return arch_atomic64_fetch_dec(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
return arch_atomic64_fetch_dec_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_dec_release(atomic_long_t *v)
{
return arch_atomic64_fetch_dec_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
return arch_atomic64_fetch_dec_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_long_and(long i, atomic_long_t *v)
{
arch_atomic64_and(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_and(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_and(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_and_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_and_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_and_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_long_andnot(long i, atomic_long_t *v)
{
arch_atomic64_andnot(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_andnot(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_andnot_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_andnot_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_andnot_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_long_or(long i, atomic_long_t *v)
{
arch_atomic64_or(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_or(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_or(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_or_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_or_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_or_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
arch_atomic_long_xor(long i, atomic_long_t *v)
{
arch_atomic64_xor(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_xor(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_xor(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_xor_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_xor_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
return arch_atomic64_fetch_xor_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_xchg(atomic_long_t *v, long i)
{
return arch_atomic64_xchg(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_xchg_acquire(atomic_long_t *v, long i)
{
return arch_atomic64_xchg(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_xchg_release(atomic_long_t *v, long i)
{
return arch_atomic64_xchg(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
{
return arch_atomic64_xchg(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
return arch_atomic64_cmpxchg(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
return arch_atomic64_cmpxchg(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
return arch_atomic64_cmpxchg(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
return arch_atomic64_cmpxchg(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
return arch_atomic64_try_cmpxchg(v, (s64 *)old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
return arch_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
return arch_atomic64_try_cmpxchg_release(v, (s64 *)old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
return arch_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_long_sub_and_test(long i, atomic_long_t *v)
{
return arch_atomic64_sub_and_test(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_long_dec_and_test(atomic_long_t *v)
{
return arch_atomic64_dec_and_test(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_long_inc_and_test(atomic_long_t *v)
{
return arch_atomic64_inc_and_test(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_long_add_negative(long i, atomic_long_t *v)
{
return arch_atomic64_add_negative(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
return arch_atomic64_fetch_add_unless(v, a, u);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
return arch_atomic64_add_unless(v, a, u);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_long_inc_not_zero(atomic_long_t *v)
{
return arch_atomic64_inc_not_zero(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_long_inc_unless_negative(atomic_long_t *v)
{
return arch_atomic64_inc_unless_negative(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
arch_atomic_long_dec_unless_positive(atomic_long_t *v)
{
return arch_atomic64_dec_unless_positive(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
arch_atomic_long_dec_if_positive(atomic_long_t *v)
{
return arch_atomic64_sub_if_positive(v, 1);
}
# 82 "./include/linux/atomic.h" 2
# 1 "./include/linux/atomic/atomic-instrumented.h" 1
# 22 "./include/linux/atomic/atomic-instrumented.h"
# 1 "./include/linux/instrumented.h" 1
# 24 "./include/linux/instrumented.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void instrument_read(const volatile void *v, size_t size)
{
kasan_check_read(v, size);
kcsan_check_access(v, size, 0);
}
# 39 "./include/linux/instrumented.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void instrument_write(const volatile void *v, size_t size)
{
kasan_check_write(v, size);
kcsan_check_access(v, size, (1 << 0));
}
# 54 "./include/linux/instrumented.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void instrument_read_write(const volatile void *v, size_t size)
{
kasan_check_write(v, size);
kcsan_check_access(v, size, (1 << 1) | (1 << 0));
}
# 69 "./include/linux/instrumented.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void instrument_atomic_read(const volatile void *v, size_t size)
{
kasan_check_read(v, size);
kcsan_check_access(v, size, (1 << 2));
}
# 84 "./include/linux/instrumented.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void instrument_atomic_write(const volatile void *v, size_t size)
{
kasan_check_write(v, size);
kcsan_check_access(v, size, (1 << 2) | (1 << 0));
}
# 99 "./include/linux/instrumented.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void instrument_atomic_read_write(const volatile void *v, size_t size)
{
kasan_check_write(v, size);
kcsan_check_access(v, size, (1 << 2) | (1 << 0) | (1 << 1));
}
# 115 "./include/linux/instrumented.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
instrument_copy_to_user(void *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
kcsan_check_access(from, n, 0);
}
# 132 "./include/linux/instrumented.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
instrument_copy_from_user(const void *to, const void *from, unsigned long n)
{
kasan_check_write(to, n);
kcsan_check_access(to, n, (1 << 0));
}
# 23 "./include/linux/atomic/atomic-instrumented.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_read(const atomic_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_read_acquire(const atomic_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_set(atomic_t *v, int i)
{
instrument_atomic_write(v, sizeof(*v));
arch_atomic_set(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_set_release(atomic_t *v, int i)
{
do { } while (0);
instrument_atomic_write(v, sizeof(*v));
arch_atomic_set_release(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_add(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_add(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_add_return(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_add_return_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_add_return_release(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_add_return_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_add(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_add_release(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_add_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_sub(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_sub(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_sub_return(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_sub_return_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_sub_return_release(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_sub_return_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_sub(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_sub_release(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_inc(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_inc(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_inc_return(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_inc_return_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_inc_return_release(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_inc_return_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_inc(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_inc_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_inc_release(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_inc_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_dec(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_dec(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_dec_return(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_dec_return_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_dec_return_release(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_dec_return_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_dec(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_dec_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_dec_release(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_dec_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_and(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_and(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_and(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_and_release(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_and_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_andnot(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_andnot(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_andnot(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_or(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_or(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_or(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_or_release(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_or_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_xor(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_xor(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_xor(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_xor_release(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_xchg(atomic_t *v, int i)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_xchg_acquire(atomic_t *v, int i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_acquire(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_xchg_release(atomic_t *v, int i)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_release(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_xchg_relaxed(atomic_t *v, int i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_relaxed(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_acquire(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_release(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_relaxed(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_acquire(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_release(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_sub_and_test(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_dec_and_test(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_inc_and_test(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_add_negative(int i, atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_unless(v, a, u);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_add_unless(atomic_t *v, int a, int u)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_unless(v, a, u);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_inc_not_zero(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_not_zero(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_inc_unless_negative(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_unless_negative(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_dec_unless_positive(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_unless_positive(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
atomic_dec_if_positive(atomic_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_if_positive(v, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_read(const atomic64_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_read_acquire(const atomic64_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic64_set(atomic64_t *v, s64 i)
{
instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic64_set_release(atomic64_t *v, s64 i)
{
do { } while (0);
instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set_release(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic64_add(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_add(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_add_return(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic64_sub(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_sub(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic64_inc(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_inc(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_inc_return(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_inc_return_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_inc_return_release(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_inc(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_inc_release(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic64_dec(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_dec(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_dec_return(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_dec_return_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_dec_return_release(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_dec(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_dec_release(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic64_and(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_and(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic64_andnot(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_andnot(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic64_or(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_or(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic64_xor(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_xor(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_xchg(atomic64_t *v, s64 i)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_xchg_release(atomic64_t *v, s64 i)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_xchg_relaxed(atomic64_t *v, s64 i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_acquire(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_release(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic64_dec_and_test(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic64_inc_and_test(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_unless(v, a, u);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_unless(v, a, u);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic64_inc_not_zero(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic64_inc_unless_negative(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_unless_negative(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic64_dec_unless_positive(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_unless_positive(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) s64
atomic64_dec_if_positive(atomic64_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_if_positive(v, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_read(const atomic_long_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return arch_atomic_long_read(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_read_acquire(const atomic_long_t *v)
{
instrument_atomic_read(v, sizeof(*v));
return arch_atomic_long_read_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_long_set(atomic_long_t *v, long i)
{
instrument_atomic_write(v, sizeof(*v));
arch_atomic_long_set(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_long_set_release(atomic_long_t *v, long i)
{
do { } while (0);
instrument_atomic_write(v, sizeof(*v));
arch_atomic_long_set_release(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_long_add(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_long_add(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_add_return(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_add_return(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_add_return_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_add_return_release(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_add_return_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_add_return_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_add(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_add(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_add_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_add_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_add_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_long_sub(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_long_sub(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_sub_return(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_sub_return(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_sub_return_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_sub_return_release(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_sub_return_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_sub_return_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_sub(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_sub(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_sub_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_sub_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_sub_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_long_inc(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_long_inc(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_inc_return(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_inc_return(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_inc_return_acquire(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_inc_return_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_inc_return_release(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_inc_return_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_inc_return_relaxed(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_inc_return_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_inc(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_inc(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_inc_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_inc_release(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_inc_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_inc_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_long_dec(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_long_dec(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_dec_return(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_dec_return(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_dec_return_acquire(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_dec_return_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_dec_return_release(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_dec_return_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_dec_return_relaxed(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_dec_return_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_dec(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_dec(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_dec_acquire(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_dec_release(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_dec_release(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_dec_relaxed(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_long_and(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_long_and(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_and(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_and(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_and_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_and_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_and_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_long_andnot(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_long_andnot(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_andnot(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_andnot_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_andnot_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_andnot_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_long_or(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_long_or(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_or(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_or(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_or_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_or_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_or_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
atomic_long_xor(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_long_xor(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_xor(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_xor(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_xor_acquire(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_xor_release(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_xor_relaxed(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_xchg(atomic_long_t *v, long i)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_xchg(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_xchg_acquire(atomic_long_t *v, long i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_xchg_acquire(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_xchg_release(atomic_long_t *v, long i)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_xchg_release(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_xchg_relaxed(atomic_long_t *v, long i)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_xchg_relaxed(v, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_cmpxchg(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_cmpxchg_acquire(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_cmpxchg_release(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_cmpxchg_relaxed(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_long_try_cmpxchg(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_long_try_cmpxchg_acquire(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_long_try_cmpxchg_release(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_long_try_cmpxchg_relaxed(v, old, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_long_sub_and_test(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_sub_and_test(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_long_dec_and_test(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_dec_and_test(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_long_inc_and_test(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_inc_and_test(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_long_add_negative(long i, atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_add_negative(i, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_fetch_add_unless(v, a, u);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_add_unless(v, a, u);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_long_inc_not_zero(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_inc_not_zero(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_long_inc_unless_negative(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_inc_unless_negative(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
atomic_long_dec_unless_positive(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_dec_unless_positive(v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
atomic_long_dec_if_positive(atomic_long_t *v)
{
do { } while (0);
instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_long_dec_if_positive(v);
}
# 83 "./include/linux/atomic.h" 2
# 14 "./include/linux/mm_types_task.h" 2
# 1 "./include/linux/cpumask.h" 1
# 12 "./include/linux/cpumask.h"
# 1 "./include/linux/bitmap.h" 1








# 1 "./include/linux/find.h" 1
# 11 "./include/linux/find.h"
extern unsigned long _find_next_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long nbits,
unsigned long start, unsigned long invert, unsigned long le);
extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size);
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
# 30 "./include/linux/find.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
if ((__builtin_constant_p(size) && (size) <= 64 && (size) > 0)) {
unsigned long val;

if (__builtin_expect(!!(offset >= size), 0))
return size;

val = *addr & ((((int)(sizeof(struct { int:(-!!(__builtin_choose_expr( (sizeof(int) == sizeof(*(8 ? ((void *)((long)((offset) > (size - 1)) * 0l)) : (int *)8))), (offset) > (size - 1), 0))); })))) + (((~(((0UL)))) - ((((1UL))) << (offset)) + 1) & (~(((0UL))) >> (64 - 1 - (size - 1)))));
return val ? __ffs(val) : size;
}

return _find_next_bit(addr, ((void *)0), size, offset, 0UL, 0);
}
# 59 "./include/linux/find.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
unsigned long find_next_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
{
if ((__builtin_constant_p(size) && (size) <= 64 && (size) > 0)) {
unsigned long val;

if (__builtin_expect(!!(offset >= size), 0))
return size;

val = *addr1 & *addr2 & ((((int)(sizeof(struct { int:(-!!(__builtin_choose_expr( (sizeof(int) == sizeof(*(8 ? ((void *)((long)((offset) > (size - 1)) * 0l)) : (int *)8))), (offset) > (size - 1), 0))); })))) + (((~(((0UL)))) - ((((1UL))) << (offset)) + 1) & (~(((0UL))) >> (64 - 1 - (size - 1)))));
return val ? __ffs(val) : size;
}

return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
}
# 88 "./include/linux/find.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
if ((__builtin_constant_p(size) && (size) <= 64 && (size) > 0)) {
unsigned long val;

if (__builtin_expect(!!(offset >= size), 0))
return size;

val = *addr | ~((((int)(sizeof(struct { int:(-!!(__builtin_choose_expr( (sizeof(int) == sizeof(*(8 ? ((void *)((long)((offset) > (size - 1)) * 0l)) : (int *)8))), (offset) > (size - 1), 0))); })))) + (((~(((0UL)))) - ((((1UL))) << (offset)) + 1) & (~(((0UL))) >> (64 - 1 - (size - 1)))));
return val == ~0UL ? size : __ffs(~(val));
}

return _find_next_bit(addr, ((void *)0), size, offset, ~0UL, 0);
}
# 115 "./include/linux/find.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
if ((__builtin_constant_p(size) && (size) <= 64 && (size) > 0)) {
unsigned long val = *addr & ((((int)(sizeof(struct { int:(-!!(__builtin_choose_expr( (sizeof(int) == sizeof(*(8 ? ((void *)((long)((0) > (size - 1)) * 0l)) : (int *)8))), (0) > (size - 1), 0))); })))) + (((~(((0UL)))) - ((((1UL))) << (0)) + 1) & (~(((0UL))) >> (64 - 1 - (size - 1)))));

return val ? __ffs(val) : size;
}

return _find_first_bit(addr, size);
}
# 138 "./include/linux/find.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
unsigned long find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
{
if ((__builtin_constant_p(size) && (size) <= 64 && (size) > 0)) {
unsigned long val = *addr1 & *addr2 & ((((int)(sizeof(struct { int:(-!!(__builtin_choose_expr( (sizeof(int) == sizeof(*(8 ? ((void *)((long)((0) > (size - 1)) * 0l)) : (int *)8))), (0) > (size - 1), 0))); })))) + (((~(((0UL)))) - ((((1UL))) << (0)) + 1) & (~(((0UL))) >> (64 - 1 - (size - 1)))));

return val ? __ffs(val) : size;
}

return _find_first_and_bit(addr1, addr2, size);
}
# 162 "./include/linux/find.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
if ((__builtin_constant_p(size) && (size) <= 64 && (size) > 0)) {
unsigned long val = *addr | ~((((int)(sizeof(struct { int:(-!!(__builtin_choose_expr( (sizeof(int) == sizeof(*(8 ? ((void *)((long)((0) > (size - 1)) * 0l)) : (int *)8))), (0) > (size - 1), 0))); })))) + (((~(((0UL)))) - ((((1UL))) << (0)) + 1) & (~(((0UL))) >> (64 - 1 - (size - 1)))));

return val == ~0UL ? size : __ffs(~(val));
}

return _find_first_zero_bit(addr, size);
}
# 183 "./include/linux/find.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
{
if ((__builtin_constant_p(size) && (size) <= 64 && (size) > 0)) {
unsigned long val = *addr & ((((int)(sizeof(struct { int:(-!!(__builtin_choose_expr( (sizeof(int) == sizeof(*(8 ? ((void *)((long)((0) > (size - 1)) * 0l)) : (int *)8))), (0) > (size - 1), 0))); })))) + (((~(((0UL)))) - ((((1UL))) << (0)) + 1) & (~(((0UL))) >> (64 - 1 - (size - 1)))));

return val ? __fls(val) : size;
}

return _find_last_bit(addr, size);
}
# 206 "./include/linux/find.h"
extern unsigned long find_next_clump8(unsigned long *clump,
const unsigned long *addr,
unsigned long size, unsigned long offset);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
return find_next_zero_bit(addr, size, offset);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
return find_next_bit(addr, size, offset);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long find_first_zero_bit_le(const void *addr,
unsigned long size)
{
return find_first_zero_bit(addr, size);
}
# 10 "./include/linux/bitmap.h" 2

# 1 "./include/linux/string.h" 1
# 10 "./include/linux/string.h"
# 1 "./include/uapi/linux/string.h" 1
# 11 "./include/linux/string.h" 2

extern char *strndup_user(const char *, long);
extern void *memdup_user(const void *, size_t);
extern void *vmemdup_user(const void *, size_t);
extern void *memdup_user_nul(const void *, size_t);





# 1 "./arch/riscv/include/asm/string.h" 1
# 13 "./arch/riscv/include/asm/string.h"
extern void *memset(void *, int, size_t);
extern void *__memset(void *, int, size_t);

extern void *memcpy(void *, const void *, size_t);
extern void *__memcpy(void *, const void *, size_t);

extern void *memmove(void *, const void *, size_t);
extern void *__memmove(void *, const void *, size_t);
# 21 "./include/linux/string.h" 2


extern char * strcpy(char *,const char *);


extern char * strncpy(char *,const char *, __kernel_size_t);


size_t strlcpy(char *, const char *, size_t);


ssize_t strscpy(char *, const char *, size_t);



ssize_t strscpy_pad(char *dest, const char *src, size_t count);


extern char * strcat(char *, const char *);


extern char * strncat(char *, const char *, __kernel_size_t);


extern size_t strlcat(char *, const char *, __kernel_size_t);


extern int strcmp(const char *,const char *);


extern int strncmp(const char *,const char *,__kernel_size_t);


extern int strcasecmp(const char *s1, const char *s2);


extern int strncasecmp(const char *s1, const char *s2, size_t n);


extern char * strchr(const char *,int);


extern char * strchrnul(const char *,int);

extern char * strnchrnul(const char *, size_t, int);

extern char * strnchr(const char *, size_t, int);


extern char * strrchr(const char *,int);

extern char * __attribute__((__warn_unused_result__)) skip_spaces(const char *);

extern char *strim(char *);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) char *strstrip(char *str)
{
return strim(str);
}


extern char * strstr(const char *, const char *);


extern char * strnstr(const char *, const char *, size_t);


extern __kernel_size_t strlen(const char *);


extern __kernel_size_t strnlen(const char *,__kernel_size_t);


extern char * strpbrk(const char *,const char *);


extern char * strsep(char **,const char *);


extern __kernel_size_t strspn(const char *,const char *);


extern __kernel_size_t strcspn(const char *,const char *);







extern void *memset16(uint16_t *, uint16_t, __kernel_size_t);



extern void *memset32(uint32_t *, uint32_t, __kernel_size_t);



extern void *memset64(uint64_t *, uint64_t, __kernel_size_t);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *memset_l(unsigned long *p, unsigned long v,
__kernel_size_t n)
{
if (64 == 32)
return memset32((uint32_t *)p, v, n);
else
return memset64((uint64_t *)p, v, n);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *memset_p(void **p, void *v, __kernel_size_t n)
{
if (64 == 32)
return memset32((uint32_t *)p, (uintptr_t)v, n);
else
return memset64((uint64_t *)p, (uintptr_t)v, n);
}

extern void **__memcat_p(void **a, void **b);
# 153 "./include/linux/string.h"
extern void * memscan(void *,int,__kernel_size_t);


extern int memcmp(const void *,const void *,__kernel_size_t);


extern int bcmp(const void *,const void *,__kernel_size_t);


extern void * memchr(const void *,int,__kernel_size_t);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memcpy_flushcache(void *dst, const void *src, size_t cnt)
{
memcpy(dst, src, cnt);
}


void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *s, char old, char new);

extern void kfree_const(const void *x);

extern char *kstrdup(const char *s, gfp_t gfp) __attribute__((__malloc__));
extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);

extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);

extern bool sysfs_streq(const char *s1, const char *s2);
int match_string(const char * const *array, size_t n, const char *string);
int __sysfs_match_string(const char * const *array, size_t n, const char *s);
# 199 "./include/linux/string.h"
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __attribute__((__format__(printf, 3, 4)));


extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
const void *from, size_t available);

int ptr_to_hashval(const void *ptr, unsigned long *hashval_out);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool strstarts(const char *str, const char *prefix)
{
return strncmp(str, prefix, strlen(prefix)) == 0;
}

size_t memweight(const void *ptr, size_t bytes);
# 235 "./include/linux/string.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memzero_explicit(void *s, size_t count)
{
memset(s, 0, count);
__asm__ __volatile__("": :"r"(s) :"memory");
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *kbasename(const char *path)
{
const char *tail = strrchr(path, '/');
return tail ? tail + 1 : path;
}





void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
int pad);
# 309 "./include/linux/string.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) size_t str_has_prefix(const char *str, const char *prefix)
{
size_t len = strlen(prefix);
return strncmp(str, prefix, len) == 0 ? len : 0;
}
# 12 "./include/linux/bitmap.h" 2


struct device;
# 119 "./include/linux/bitmap.h"
unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node);
unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node);
void bitmap_free(const unsigned long *bitmap);


unsigned long *devm_bitmap_alloc(struct device *dev,
unsigned int nbits, gfp_t flags);
unsigned long *devm_bitmap_zalloc(struct device *dev,
unsigned int nbits, gfp_t flags);





int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
bool __attribute__((__pure__)) __bitmap_or_equal(const unsigned long *src1,
const unsigned long *src2,
const unsigned long *src3,
unsigned int nbits);
void __bitmap_complement(unsigned long *dst, const unsigned long *src,
unsigned int nbits);
void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
void bitmap_cut(unsigned long *dst, const unsigned long *src,
unsigned int first, unsigned int cut, unsigned int nbits);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_replace(unsigned long *dst,
const unsigned long *old, const unsigned long *new,
const unsigned long *mask, unsigned int nbits);
int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
void __bitmap_set(unsigned long *map, unsigned int start, int len);
void __bitmap_clear(unsigned long *map, unsigned int start, int len);

unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask,
unsigned long align_offset);
# 187 "./include/linux/bitmap.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
bitmap_find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask)
{
return bitmap_find_next_zero_area_off(map, size, start, nr,
align_mask, 0);
}

int bitmap_parse(const char *buf, unsigned int buflen,
unsigned long *dst, int nbits);
int bitmap_parse_user(const char *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
int bitmap_parselist(const char *buf, unsigned long *maskp,
int nmaskbits);
int bitmap_parselist_user(const char *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, unsigned int nbits);
int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits);
void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits);
int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);






unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);

extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
int nmaskbits, loff_t off, size_t count);

extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
int nmaskbits, loff_t off, size_t count);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
unsigned int len = (((nbits) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8))) * sizeof(unsigned long);
memset(dst, 0, len);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
unsigned int len = (((nbits) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8))) * sizeof(unsigned long);
memset(dst, 0xff, len);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
unsigned int len = (((nbits) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8))) * sizeof(unsigned long);
memcpy(dst, src, len);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bitmap_copy_clear_tail(unsigned long *dst,
const unsigned long *src, unsigned int nbits)
{
bitmap_copy(dst, src, nbits);
if (nbits % 64)
dst[nbits / 64] &= (~0UL >> (-(nbits) & (64 - 1)));
}






void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
unsigned int nbits);
void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
unsigned int nbits);
# 284 "./include/linux/bitmap.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return (*dst = *src1 & *src2 & (~0UL >> (-(nbits) & (64 - 1)))) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bitmap_or(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
*dst = *src1 | *src2;
else
__bitmap_or(dst, src1, src2, nbits);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
*dst = *src1 ^ *src2;
else
__bitmap_xor(dst, src1, src2, nbits);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return (*dst = *src1 & ~(*src2) & (~0UL >> (-(nbits) & (64 - 1)))) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bitmap_complement(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
*dst = ~(*src);
else
__bitmap_complement(dst, src, nbits);
}
# 334 "./include/linux/bitmap.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int bitmap_equal(const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return !((*src1 ^ *src2) & (~0UL >> (-(nbits) & (64 - 1))));
if (__builtin_constant_p(nbits & (8 - 1)) &&
(((nbits) & ((typeof(nbits))(8) - 1)) == 0))
return !memcmp(src1, src2, nbits / 8);
return __bitmap_equal(src1, src2, nbits);
}
# 354 "./include/linux/bitmap.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bitmap_or_equal(const unsigned long *src1,
const unsigned long *src2,
const unsigned long *src3,
unsigned int nbits)
{
if (!(__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return __bitmap_or_equal(src1, src2, src3, nbits);

return !(((*src1 | *src2) ^ *src3) & (~0UL >> (-(nbits) & (64 - 1))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int bitmap_intersects(const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return ((*src1 & *src2) & (~0UL >> (-(nbits) & (64 - 1)))) != 0;
else
return __bitmap_intersects(src1, src2, nbits);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int bitmap_subset(const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return ! ((*src1 & ~(*src2)) & (~0UL >> (-(nbits) & (64 - 1))));
else
return __bitmap_subset(src1, src2, nbits);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bitmap_empty(const unsigned long *src, unsigned nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return ! (*src & (~0UL >> (-(nbits) & (64 - 1))));

return find_first_bit(src, nbits) == nbits;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return ! (~(*src) & (~0UL >> (-(nbits) & (64 - 1))));

return find_first_zero_bit(src, nbits) == nbits;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
return hweight_long(*src & (~0UL >> (-(nbits) & (64 - 1))));
return __bitmap_weight(src, nbits);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void bitmap_set(unsigned long *map, unsigned int start,
unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
arch___set_bit(start, map);
else if (__builtin_constant_p(start & (8 - 1)) &&
(((start) & ((typeof(start))(8) - 1)) == 0) &&
__builtin_constant_p(nbits & (8 - 1)) &&
(((nbits) & ((typeof(nbits))(8) - 1)) == 0))
memset((char *)map + start / 8, 0xff, nbits / 8);
else
__bitmap_set(map, start, nbits);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void bitmap_clear(unsigned long *map, unsigned int start,
unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
arch___clear_bit(start, map);
else if (__builtin_constant_p(start & (8 - 1)) &&
(((start) & ((typeof(start))(8) - 1)) == 0) &&
__builtin_constant_p(nbits & (8 - 1)) &&
(((nbits) & ((typeof(nbits))(8) - 1)) == 0))
memset((char *)map + start / 8, 0, nbits / 8);
else
__bitmap_clear(map, start, nbits);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
*dst = (*src & (~0UL >> (-(nbits) & (64 - 1)))) >> shift;
else
__bitmap_shift_right(dst, src, shift, nbits);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
*dst = (*src << shift) & (~0UL >> (-(nbits) & (64 - 1)));
else
__bitmap_shift_left(dst, src, shift, nbits);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bitmap_replace(unsigned long *dst,
const unsigned long *old,
const unsigned long *new,
const unsigned long *mask,
unsigned int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0))
*dst = (*old & ~(*mask)) | (*new & *mask);
else
__bitmap_replace(dst, old, new, mask, nbits);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bitmap_next_set_region(unsigned long *bitmap,
unsigned int *rs, unsigned int *re,
unsigned int end)
{
*rs = find_next_bit(bitmap, end, *rs);
*re = find_next_zero_bit(bitmap, end, *rs + 1);
}
# 515 "./include/linux/bitmap.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bitmap_from_u64(unsigned long *dst, u64 mask)
{
dst[0] = mask & (~0UL);

if (sizeof(mask) > sizeof(unsigned long))
dst[1] = mask >> 32;
}
# 531 "./include/linux/bitmap.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long bitmap_get_value8(const unsigned long *map,
unsigned long start)
{
const size_t index = ((start) / 64);
const unsigned long offset = start % 64;

return (map[index] >> offset) & 0xFF;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bitmap_set_value8(unsigned long *map, unsigned long value,
unsigned long start)
{
const size_t index = ((start) / 64);
const unsigned long offset = start % 64;

map[index] &= ~(0xFFUL << offset);
map[index] |= value << offset;
}
# 13 "./include/linux/cpumask.h" 2




typedef struct cpumask { unsigned long bits[(((32) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; } cpumask_t;
# 39 "./include/linux/cpumask.h"
extern unsigned int nr_cpu_ids;
# 90 "./include/linux/cpumask.h"
extern struct cpumask __cpu_possible_mask;
extern struct cpumask __cpu_online_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
extern struct cpumask __cpu_dying_mask;






extern atomic_t __num_online_cpus;

extern cpumask_t cpus_booted_once_mask;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
{

({ int __ret_warn_on = !!(cpu >= bits); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/cpumask.h"), "i" (108), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned int cpumask_check(unsigned int cpu)
{
cpu_max_bits_warn(cpu, ((unsigned int)32));
return cpu;
}
# 204 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int cpumask_first(const struct cpumask *srcp)
{
return find_first_bit(((srcp)->bits), ((unsigned int)32));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
return find_first_zero_bit(((srcp)->bits), ((unsigned int)32));
}
# 227 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return find_first_and_bit(((srcp1)->bits), ((srcp2)->bits), ((unsigned int)32));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int cpumask_last(const struct cpumask *srcp)
{
return find_last_bit(((srcp)->bits), ((unsigned int)32));
}

unsigned int __attribute__((__pure__)) cpumask_next(int n, const struct cpumask *srcp);
# 253 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{

if (n != -1)
cpumask_check(n);
return find_next_zero_bit(((srcp)->bits), ((unsigned int)32), n+1);
}

int __attribute__((__pure__)) cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int __attribute__((__pure__)) cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
unsigned int cpumask_local_spread(unsigned int i, int node);
int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p);
int cpumask_any_distribute(const struct cpumask *srcp);
# 292 "./include/linux/cpumask.h"
extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
# 344 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), ((dstp)->bits));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
arch___set_bit(cpumask_check(cpu), ((dstp)->bits));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
clear_bit(cpumask_check(cpu), ((dstp)->bits));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
arch___clear_bit(cpumask_check(cpu), ((dstp)->bits));
}
# 377 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return arch_test_bit(cpumask_check(cpu), (((cpumask))->bits));
}
# 391 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), ((cpumask)->bits));
}
# 405 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), ((cpumask)->bits));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpumask_setall(struct cpumask *dstp)
{
bitmap_fill(((dstp)->bits), ((unsigned int)32));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpumask_clear(struct cpumask *dstp)
{
bitmap_zero(((dstp)->bits), ((unsigned int)32));
}
# 436 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_and(((dstp)->bits), ((src1p)->bits),
((src2p)->bits), ((unsigned int)32));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
const struct cpumask *src2p)
{
bitmap_or(((dstp)->bits), ((src1p)->bits),
((src2p)->bits), ((unsigned int)32));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpumask_xor(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
bitmap_xor(((dstp)->bits), ((src1p)->bits),
((src2p)->bits), ((unsigned int)32));
}
# 479 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_andnot(((dstp)->bits), ((src1p)->bits),
((src2p)->bits), ((unsigned int)32));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpumask_complement(struct cpumask *dstp,
const struct cpumask *srcp)
{
bitmap_complement(((dstp)->bits), ((srcp)->bits),
((unsigned int)32));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpumask_equal(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_equal(((src1p)->bits), ((src2p)->bits),
((unsigned int)32));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpumask_or_equal(const struct cpumask *src1p,
const struct cpumask *src2p,
const struct cpumask *src3p)
{
return bitmap_or_equal(((src1p)->bits), ((src2p)->bits),
((src3p)->bits), ((unsigned int)32));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpumask_intersects(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_intersects(((src1p)->bits), ((src2p)->bits),
((unsigned int)32));
}
# 544 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpumask_subset(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_subset(((src1p)->bits), ((src2p)->bits),
((unsigned int)32));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpumask_empty(const struct cpumask *srcp)
{
return bitmap_empty(((srcp)->bits), ((unsigned int)32));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpumask_full(const struct cpumask *srcp)
{
return bitmap_full(((srcp)->bits), ((unsigned int)32));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int cpumask_weight(const struct cpumask *srcp)
{
return bitmap_weight(((srcp)->bits), ((unsigned int)32));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpumask_shift_right(struct cpumask *dstp,
const struct cpumask *srcp, int n)
{
bitmap_shift_right(((dstp)->bits), ((srcp)->bits), n,
((unsigned int)32));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpumask_shift_left(struct cpumask *dstp,
const struct cpumask *srcp, int n)
{
bitmap_shift_left(((dstp)->bits), ((srcp)->bits), n,
((unsigned int)32));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpumask_copy(struct cpumask *dstp,
const struct cpumask *srcp)
{
bitmap_copy(((dstp)->bits), ((srcp)->bits), ((unsigned int)32));
}
# 646 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpumask_parse_user(const char *buf, int len,
struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, ((dstp)->bits), ((unsigned int)32));
}
# 660 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpumask_parselist_user(const char *buf, int len,
struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, ((dstp)->bits),
((unsigned int)32));
}
# 674 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpumask_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parse(buf, (~0U), ((dstp)->bits), ((unsigned int)32));
}
# 686 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, ((dstp)->bits), ((unsigned int)32));
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int cpumask_size(void)
{
return (((((unsigned int)32)) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8))) * sizeof(long);
}
# 759 "./include/linux/cpumask.h"
typedef struct cpumask cpumask_var_t[1];




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
cpumask_clear(*mask);
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
cpumask_clear(*mask);
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void free_cpumask_var(cpumask_var_t mask)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpumask_available(cpumask_var_t mask)
{
return true;
}




extern const unsigned long cpu_all_bits[(((32) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))];
# 819 "./include/linux/cpumask.h"
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
void init_cpu_online(const struct cpumask *src);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void reset_cpu_possible_mask(void)
{
bitmap_zero(((&__cpu_possible_mask)->bits), 32);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
set_cpu_possible(unsigned int cpu, bool possible)
{
if (possible)
cpumask_set_cpu(cpu, &__cpu_possible_mask);
else
cpumask_clear_cpu(cpu, &__cpu_possible_mask);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
set_cpu_present(unsigned int cpu, bool present)
{
if (present)
cpumask_set_cpu(cpu, &__cpu_present_mask);
else
cpumask_clear_cpu(cpu, &__cpu_present_mask);
}

void set_cpu_online(unsigned int cpu, bool online);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
set_cpu_active(unsigned int cpu, bool active)
{
if (active)
cpumask_set_cpu(cpu, &__cpu_active_mask);
else
cpumask_clear_cpu(cpu, &__cpu_active_mask);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
set_cpu_dying(unsigned int cpu, bool dying)
{
if (dying)
cpumask_set_cpu(cpu, &__cpu_dying_mask);
else
cpumask_clear_cpu(cpu, &__cpu_dying_mask);
}
# 880 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __check_is_bitmap(const unsigned long *bitmap)
{
return 1;
}
# 892 "./include/linux/cpumask.h"
extern const unsigned long
cpu_bit_bitmap[64 +1][(((32) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))];

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct cpumask *get_cpu_mask(unsigned int cpu)
{
const unsigned long *p = cpu_bit_bitmap[1 + cpu % 64];
p -= cpu / 64;
return ((struct cpumask *)(1 ? (p) : (void *)sizeof(__check_is_bitmap(p))));
}
# 911 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int num_online_cpus(void)
{
return atomic_read(&__num_online_cpus);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpu_online(unsigned int cpu)
{
return cpumask_test_cpu(cpu, ((const struct cpumask *)&__cpu_online_mask));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpu_possible(unsigned int cpu)
{
return cpumask_test_cpu(cpu, ((const struct cpumask *)&__cpu_possible_mask));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpu_present(unsigned int cpu)
{
return cpumask_test_cpu(cpu, ((const struct cpumask *)&__cpu_present_mask));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpu_active(unsigned int cpu)
{
return cpumask_test_cpu(cpu, ((const struct cpumask *)&__cpu_active_mask));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpu_dying(unsigned int cpu)
{
return cpumask_test_cpu(cpu, ((const struct cpumask *)&__cpu_dying_mask));
}
# 1005 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ssize_t
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, ((mask)->bits),
nr_cpu_ids);
}
# 1028 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ssize_t
cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
loff_t off, size_t count)
{
return bitmap_print_bitmask_to_buf(buf, ((mask)->bits),
nr_cpu_ids, off, count) - 1;
}
# 1043 "./include/linux/cpumask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ssize_t
cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
loff_t off, size_t count)
{
return bitmap_print_list_to_buf(buf, ((mask)->bits),
nr_cpu_ids, off, count) - 1;
}
# 15 "./include/linux/mm_types_task.h" 2
# 34 "./include/linux/mm_types_task.h"
struct vmacache {
u64 seqnum;
struct vm_area_struct *vmas[(1U << 2)];
};





enum {
MM_FILEPAGES,
MM_ANONPAGES,
MM_SWAPENTS,
MM_SHMEMPAGES,
NR_MM_COUNTERS
};




struct task_rss_stat {
int events;
int count[NR_MM_COUNTERS];
};


struct mm_rss_stat {
atomic_long_t count[NR_MM_COUNTERS];
};

struct page_frag {
struct page *page;

__u32 offset;
__u32 size;




};


struct tlbflush_unmap_batch {
# 97 "./include/linux/mm_types_task.h"
};
# 6 "./include/linux/mm_types.h" 2

# 1 "./include/linux/auxvec.h" 1




# 1 "./include/uapi/linux/auxvec.h" 1




# 1 "./arch/riscv/include/uapi/asm/auxvec.h" 1
# 6 "./include/uapi/linux/auxvec.h" 2
# 6 "./include/linux/auxvec.h" 2
# 8 "./include/linux/mm_types.h" 2
# 1 "./include/linux/kref.h" 1
# 16 "./include/linux/kref.h"
# 1 "./include/linux/spinlock.h" 1
# 55 "./include/linux/spinlock.h"
# 1 "./include/linux/preempt.h" 1
# 11 "./include/linux/preempt.h"
# 1 "./include/linux/list.h" 1







# 1 "./include/linux/poison.h" 1
# 9 "./include/linux/list.h" 2
# 35 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void INIT_LIST_HEAD(struct list_head *list)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_29(void) ; if (!((sizeof(list->next) == sizeof(char) || sizeof(list->next) == sizeof(short) || sizeof(list->next) == sizeof(int) || sizeof(list->next) == sizeof(long)) || sizeof(list->next) == sizeof(long long))) __compiletime_assert_29(); } while (0); do { *(volatile typeof(list->next) *)&(list->next) = (list); } while (0); } while (0);
list->prev = list;
}


extern bool __list_add_valid(struct list_head *new,
struct list_head *prev,
struct list_head *next);
extern bool __list_del_entry_valid(struct list_head *entry);
# 65 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
if (!__list_add_valid(new, prev, next))
return;

next->prev = new;
new->next = next;
new->prev = prev;
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_30(void) ; if (!((sizeof(prev->next) == sizeof(char) || sizeof(prev->next) == sizeof(short) || sizeof(prev->next) == sizeof(int) || sizeof(prev->next) == sizeof(long)) || sizeof(prev->next) == sizeof(long long))) __compiletime_assert_30(); } while (0); do { *(volatile typeof(prev->next) *)&(prev->next) = (new); } while (0); } while (0);
}
# 86 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_add(struct list_head *new, struct list_head *head)
{
__list_add(new, head, head->next);
}
# 100 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_add_tail(struct list_head *new, struct list_head *head)
{
__list_add(new, head->prev, head);
}
# 112 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __list_del(struct list_head * prev, struct list_head * next)
{
next->prev = prev;
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_31(void) ; if (!((sizeof(prev->next) == sizeof(char) || sizeof(prev->next) == sizeof(short) || sizeof(prev->next) == sizeof(int) || sizeof(prev->next) == sizeof(long)) || sizeof(prev->next) == sizeof(long long))) __compiletime_assert_31(); } while (0); do { *(volatile typeof(prev->next) *)&(prev->next) = (next); } while (0); } while (0);
}
# 126 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __list_del_clearprev(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->prev = ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __list_del_entry(struct list_head *entry)
{
if (!__list_del_entry_valid(entry))
return;

__list_del(entry->prev, entry->next);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_del(struct list_head *entry)
{
__list_del_entry(entry);
entry->next = ((void *) 0x100 + 0);
entry->prev = ((void *) 0x122 + 0);
}
# 160 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_replace(struct list_head *old,
struct list_head *new)
{
new->next = old->next;
new->next->prev = new;
new->prev = old->prev;
new->prev->next = new;
}
# 176 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_replace_init(struct list_head *old,
struct list_head *new)
{
list_replace(old, new);
INIT_LIST_HEAD(old);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_swap(struct list_head *entry1,
struct list_head *entry2)
{
struct list_head *pos = entry2->prev;

list_del(entry2);
list_replace(entry1, entry2);
if (pos == entry1)
pos = entry2;
list_add(entry1, pos);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_del_init(struct list_head *entry)
{
__list_del_entry(entry);
INIT_LIST_HEAD(entry);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_move(struct list_head *list, struct list_head *head)
{
__list_del_entry(list);
list_add(list, head);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_move_tail(struct list_head *list,
struct list_head *head)
{
__list_del_entry(list);
list_add_tail(list, head);
}
# 242 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_bulk_move_tail(struct list_head *head,
struct list_head *first,
struct list_head *last)
{
first->prev->next = last->next;
last->next->prev = first->prev;

head->prev->next = first;
first->prev = head->prev;

last->next = head;
head->prev = last;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int list_is_first(const struct list_head *list, const struct list_head *head)
{
return list->prev == head;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int list_is_last(const struct list_head *list, const struct list_head *head)
{
return list->next == head;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int list_is_head(const struct list_head *list, const struct list_head *head)
{
return list == head;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int list_empty(const struct list_head *head)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_32(void) ; if (!((sizeof(head->next) == sizeof(char) || sizeof(head->next) == sizeof(short) || sizeof(head->next) == sizeof(int) || sizeof(head->next) == sizeof(long)) || sizeof(head->next) == sizeof(long long))) __compiletime_assert_32(); } while (0); (*(const volatile typeof( _Generic((head->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (head->next))) *)&(head->next)); }) == head;
}
# 306 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_del_init_careful(struct list_head *entry)
{
__list_del_entry(entry);
entry->prev = entry;
do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_33(void) ; if (!((sizeof(*&entry->next) == sizeof(char) || sizeof(*&entry->next) == sizeof(short) || sizeof(*&entry->next) == sizeof(int) || sizeof(*&entry->next) == sizeof(long)))) __compiletime_assert_33(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_34(void) ; if (!((sizeof(*&entry->next) == sizeof(char) || sizeof(*&entry->next) == sizeof(short) || sizeof(*&entry->next) == sizeof(int) || sizeof(*&entry->next) == sizeof(long)) || sizeof(*&entry->next) == sizeof(long long))) __compiletime_assert_34(); } while (0); do { *(volatile typeof(*&entry->next) *)&(*&entry->next) = (entry); } while (0); } while (0); } while (0); } while (0);
}
# 326 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int list_empty_careful(const struct list_head *head)
{
struct list_head *next = ({ typeof(*&head->next) ___p1 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_35(void) ; if (!((sizeof(*&head->next) == sizeof(char) || sizeof(*&head->next) == sizeof(short) || sizeof(*&head->next) == sizeof(int) || sizeof(*&head->next) == sizeof(long)) || sizeof(*&head->next) == sizeof(long long))) __compiletime_assert_35(); } while (0); (*(const volatile typeof( _Generic((*&head->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&head->next))) *)&(*&head->next)); }); do { __attribute__((__noreturn__)) extern void __compiletime_assert_36(void) ; if (!((sizeof(*&head->next) == sizeof(char) || sizeof(*&head->next) == sizeof(short) || sizeof(*&head->next) == sizeof(int) || sizeof(*&head->next) == sizeof(long)))) __compiletime_assert_36(); } while (0); __asm__ __volatile__ ("fence " "r" "," "rw" : : : "memory"); ___p1; });
return list_is_head(next, head) && (next == head->prev);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_rotate_left(struct list_head *head)
{
struct list_head *first;

if (!list_empty(head)) {
first = head->next;
list_move_tail(first, head);
}
}
# 353 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_rotate_to_front(struct list_head *list,
struct list_head *head)
{





list_move_tail(head, list);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int list_is_singular(const struct list_head *head)
{
return !list_empty(head) && (head->next == head->prev);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __list_cut_position(struct list_head *list,
struct list_head *head, struct list_head *entry)
{
struct list_head *new_first = entry->next;
list->next = head->next;
list->next->prev = list;
list->prev = entry;
entry->next = list;
head->next = new_first;
new_first->prev = head;
}
# 399 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_cut_position(struct list_head *list,
struct list_head *head, struct list_head *entry)
{
if (list_empty(head))
return;
if (list_is_singular(head) && !list_is_head(entry, head) && (entry != head->next))
return;
if (list_is_head(entry, head))
INIT_LIST_HEAD(list);
else
__list_cut_position(list, head, entry);
}
# 426 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_cut_before(struct list_head *list,
struct list_head *head,
struct list_head *entry)
{
if (head->next == entry) {
INIT_LIST_HEAD(list);
return;
}
list->next = head->next;
list->next->prev = list;
list->prev = entry->prev;
list->prev->next = list;
head->next = entry;
entry->prev = head;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __list_splice(const struct list_head *list,
struct list_head *prev,
struct list_head *next)
{
struct list_head *first = list->next;
struct list_head *last = list->prev;

first->prev = prev;
prev->next = first;

last->next = next;
next->prev = last;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_splice(const struct list_head *list,
struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head, head->next);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_splice_tail(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head->prev, head);
}
# 487 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_splice_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head, head->next);
INIT_LIST_HEAD(list);
}
}
# 504 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_splice_tail_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head->prev, head);
INIT_LIST_HEAD(list);
}
}
# 802 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void INIT_HLIST_NODE(struct hlist_node *h)
{
h->next = ((void *)0);
h->pprev = ((void *)0);
}
# 816 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
# 829 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int hlist_unhashed_lockless(const struct hlist_node *h)
{
return !({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_37(void) ; if (!((sizeof(h->pprev) == sizeof(char) || sizeof(h->pprev) == sizeof(short) || sizeof(h->pprev) == sizeof(int) || sizeof(h->pprev) == sizeof(long)) || sizeof(h->pprev) == sizeof(long long))) __compiletime_assert_37(); } while (0); (*(const volatile typeof( _Generic((h->pprev), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->pprev))) *)&(h->pprev)); });
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int hlist_empty(const struct hlist_head *h)
{
return !({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_38(void) ; if (!((sizeof(h->first) == sizeof(char) || sizeof(h->first) == sizeof(short) || sizeof(h->first) == sizeof(int) || sizeof(h->first) == sizeof(long)) || sizeof(h->first) == sizeof(long long))) __compiletime_assert_38(); } while (0); (*(const volatile typeof( _Generic((h->first), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->first))) *)&(h->first)); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __hlist_del(struct hlist_node *n)
{
struct hlist_node *next = n->next;
struct hlist_node **pprev = n->pprev;

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_39(void) ; if (!((sizeof(*pprev) == sizeof(char) || sizeof(*pprev) == sizeof(short) || sizeof(*pprev) == sizeof(int) || sizeof(*pprev) == sizeof(long)) || sizeof(*pprev) == sizeof(long long))) __compiletime_assert_39(); } while (0); do { *(volatile typeof(*pprev) *)&(*pprev) = (next); } while (0); } while (0);
if (next)
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_40(void) ; if (!((sizeof(next->pprev) == sizeof(char) || sizeof(next->pprev) == sizeof(short) || sizeof(next->pprev) == sizeof(int) || sizeof(next->pprev) == sizeof(long)) || sizeof(next->pprev) == sizeof(long long))) __compiletime_assert_40(); } while (0); do { *(volatile typeof(next->pprev) *)&(next->pprev) = (pprev); } while (0); } while (0);
}
# 860 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_del(struct hlist_node *n)
{
__hlist_del(n);
n->next = ((void *) 0x100 + 0);
n->pprev = ((void *) 0x122 + 0);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
INIT_HLIST_NODE(n);
}
}
# 889 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_41(void) ; if (!((sizeof(n->next) == sizeof(char) || sizeof(n->next) == sizeof(short) || sizeof(n->next) == sizeof(int) || sizeof(n->next) == sizeof(long)) || sizeof(n->next) == sizeof(long long))) __compiletime_assert_41(); } while (0); do { *(volatile typeof(n->next) *)&(n->next) = (first); } while (0); } while (0);
if (first)
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_42(void) ; if (!((sizeof(first->pprev) == sizeof(char) || sizeof(first->pprev) == sizeof(short) || sizeof(first->pprev) == sizeof(int) || sizeof(first->pprev) == sizeof(long)) || sizeof(first->pprev) == sizeof(long long))) __compiletime_assert_42(); } while (0); do { *(volatile typeof(first->pprev) *)&(first->pprev) = (&n->next); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_43(void) ; if (!((sizeof(h->first) == sizeof(char) || sizeof(h->first) == sizeof(short) || sizeof(h->first) == sizeof(int) || sizeof(h->first) == sizeof(long)) || sizeof(h->first) == sizeof(long long))) __compiletime_assert_43(); } while (0); do { *(volatile typeof(h->first) *)&(h->first) = (n); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_44(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_44(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&h->first); } while (0); } while (0);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_45(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_45(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (next->pprev); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_46(void) ; if (!((sizeof(n->next) == sizeof(char) || sizeof(n->next) == sizeof(short) || sizeof(n->next) == sizeof(int) || sizeof(n->next) == sizeof(long)) || sizeof(n->next) == sizeof(long long))) __compiletime_assert_46(); } while (0); do { *(volatile typeof(n->next) *)&(n->next) = (next); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_47(void) ; if (!((sizeof(next->pprev) == sizeof(char) || sizeof(next->pprev) == sizeof(short) || sizeof(next->pprev) == sizeof(int) || sizeof(next->pprev) == sizeof(long)) || sizeof(next->pprev) == sizeof(long long))) __compiletime_assert_47(); } while (0); do { *(volatile typeof(next->pprev) *)&(next->pprev) = (&n->next); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_48(void) ; if (!((sizeof(*(n->pprev)) == sizeof(char) || sizeof(*(n->pprev)) == sizeof(short) || sizeof(*(n->pprev)) == sizeof(int) || sizeof(*(n->pprev)) == sizeof(long)) || sizeof(*(n->pprev)) == sizeof(long long))) __compiletime_assert_48(); } while (0); do { *(volatile typeof(*(n->pprev)) *)&(*(n->pprev)) = (n); } while (0); } while (0);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_add_behind(struct hlist_node *n,
struct hlist_node *prev)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_49(void) ; if (!((sizeof(n->next) == sizeof(char) || sizeof(n->next) == sizeof(short) || sizeof(n->next) == sizeof(int) || sizeof(n->next) == sizeof(long)) || sizeof(n->next) == sizeof(long long))) __compiletime_assert_49(); } while (0); do { *(volatile typeof(n->next) *)&(n->next) = (prev->next); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_50(void) ; if (!((sizeof(prev->next) == sizeof(char) || sizeof(prev->next) == sizeof(short) || sizeof(prev->next) == sizeof(int) || sizeof(prev->next) == sizeof(long)) || sizeof(prev->next) == sizeof(long long))) __compiletime_assert_50(); } while (0); do { *(volatile typeof(prev->next) *)&(prev->next) = (n); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_51(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_51(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&prev->next); } while (0); } while (0);

if (n->next)
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_52(void) ; if (!((sizeof(n->next->pprev) == sizeof(char) || sizeof(n->next->pprev) == sizeof(short) || sizeof(n->next->pprev) == sizeof(int) || sizeof(n->next->pprev) == sizeof(long)) || sizeof(n->next->pprev) == sizeof(long long))) __compiletime_assert_52(); } while (0); do { *(volatile typeof(n->next->pprev) *)&(n->next->pprev) = (&n->next); } while (0); } while (0);
}
# 937 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_add_fake(struct hlist_node *n)
{
n->pprev = &n->next;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool hlist_fake(struct hlist_node *h)
{
return h->pprev == &h->next;
}
# 959 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
{
return !n->next && n->pprev == &h->first;
}
# 973 "./include/linux/list.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_move_list(struct hlist_head *old,
struct hlist_head *new)
{
new->first = old->first;
if (new->first)
new->first->pprev = &new->first;
old->first = ((void *)0);
}
# 12 "./include/linux/preempt.h" 2
# 78 "./include/linux/preempt.h"
# 1 "./arch/riscv/include/generated/asm/preempt.h" 1
# 1 "./include/asm-generic/preempt.h" 1








static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int preempt_count(void)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_53(void) ; if (!((sizeof(((struct thread_info *)get_current())->preempt_count) == sizeof(char) || sizeof(((struct thread_info *)get_current())->preempt_count) == sizeof(short) || sizeof(((struct thread_info *)get_current())->preempt_count) == sizeof(int) || sizeof(((struct thread_info *)get_current())->preempt_count) == sizeof(long)) || sizeof(((struct thread_info *)get_current())->preempt_count) == sizeof(long long))) __compiletime_assert_53(); } while (0); (*(const volatile typeof( _Generic((((struct thread_info *)get_current())->preempt_count), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((struct thread_info *)get_current())->preempt_count))) *)&(((struct thread_info *)get_current())->preempt_count)); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) volatile int *preempt_count_ptr(void)
{
return &((struct thread_info *)get_current())->preempt_count;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void preempt_count_set(int pc)
{
*preempt_count_ptr() = pc;
}
# 35 "./include/asm-generic/preempt.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void set_preempt_need_resched(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void clear_preempt_need_resched(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool test_preempt_need_resched(void)
{
return false;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __preempt_count_add(int val)
{
*preempt_count_ptr() += val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __preempt_count_sub(int val)
{
*preempt_count_ptr() -= val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool __preempt_count_dec_and_test(void)
{





return !--*preempt_count_ptr() && test_ti_thread_flag(((struct thread_info *)get_current()), 3);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool should_resched(int preempt_offset)
{
return __builtin_expect(!!(preempt_count() == preempt_offset && test_ti_thread_flag(((struct thread_info *)get_current()), 3)), 0);

}
# 2 "./arch/riscv/include/generated/asm/preempt.h" 2
# 79 "./include/linux/preempt.h" 2
# 89 "./include/linux/preempt.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned char interrupt_context_level(void)
{
unsigned long pc = preempt_count();
unsigned char level = 0;

level += !!(pc & ((((1UL << (4))-1) << (((0 + 8) + 8) + 4))));
level += !!(pc & ((((1UL << (4))-1) << (((0 + 8) + 8) + 4)) | (((1UL << (4))-1) << ((0 + 8) + 8))));
level += !!(pc & ((((1UL << (4))-1) << (((0 + 8) + 8) + 4)) | (((1UL << (4))-1) << ((0 + 8) + 8)) | (1UL << (0 + 8))));

return level;
}
# 309 "./include/linux/preempt.h"
struct preempt_notifier;
# 325 "./include/linux/preempt.h"
struct preempt_ops {
void (*sched_in)(struct preempt_notifier *notifier, int cpu);
void (*sched_out)(struct preempt_notifier *notifier,
struct task_struct *next);
};
# 338 "./include/linux/preempt.h"
struct preempt_notifier {
struct hlist_node link;
struct preempt_ops *ops;
};

void preempt_notifier_inc(void);
void preempt_notifier_dec(void);
void preempt_notifier_register(struct preempt_notifier *notifier);
void preempt_notifier_unregister(struct preempt_notifier *notifier);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void preempt_notifier_init(struct preempt_notifier *notifier,
struct preempt_ops *ops)
{
INIT_HLIST_NODE(&notifier->link);
notifier->ops = ops;
}
# 414 "./include/linux/preempt.h"
extern void migrate_disable(void);
extern void migrate_enable(void);
# 56 "./include/linux/spinlock.h" 2





# 1 "./include/linux/bottom_half.h" 1








extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
# 18 "./include/linux/bottom_half.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void local_bh_disable(void)
{
__local_bh_disable_ip(({ __label__ __here; __here: (unsigned long)&&__here; }), (2 * (1UL << (0 + 8))));
}

extern void _local_bh_enable(void);
extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void local_bh_enable_ip(unsigned long ip)
{
__local_bh_enable_ip(ip, (2 * (1UL << (0 + 8))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void local_bh_enable(void)
{
__local_bh_enable_ip(({ __label__ __here; __here: (unsigned long)&&__here; }), (2 * (1UL << (0 + 8))));
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool local_bh_blocked(void) { return false; }
# 62 "./include/linux/spinlock.h" 2
# 1 "./include/linux/lockdep.h" 1
# 14 "./include/linux/lockdep.h"
# 1 "./include/linux/smp.h" 1
# 15 "./include/linux/smp.h"
# 1 "./include/linux/smp_types.h" 1




# 1 "./include/linux/llist.h" 1
# 56 "./include/linux/llist.h"
struct llist_head {
struct llist_node *first;
};

struct llist_node {
struct llist_node *next;
};
# 71 "./include/linux/llist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void init_llist_head(struct llist_head *list)
{
list->first = ((void *)0);
}
# 189 "./include/linux/llist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool llist_empty(const struct llist_head *head)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_54(void) ; if (!((sizeof(head->first) == sizeof(char) || sizeof(head->first) == sizeof(short) || sizeof(head->first) == sizeof(int) || sizeof(head->first) == sizeof(long)) || sizeof(head->first) == sizeof(long long))) __compiletime_assert_54(); } while (0); (*(const volatile typeof( _Generic((head->first), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (head->first))) *)&(head->first)); }) == ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct llist_node *llist_next(struct llist_node *node)
{
return node->next;
}

extern bool llist_add_batch(struct llist_node *new_first,
struct llist_node *new_last,
struct llist_head *head);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __llist_add_batch(struct llist_node *new_first,
struct llist_node *new_last,
struct llist_head *head)
{
new_last->next = head->first;
head->first = new_first;
return new_last->next == ((void *)0);
}
# 219 "./include/linux/llist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool llist_add(struct llist_node *new, struct llist_head *head)
{
return llist_add_batch(new, new, head);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __llist_add(struct llist_node *new, struct llist_head *head)
{
return __llist_add_batch(new, new, head);
}
# 237 "./include/linux/llist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct llist_node *llist_del_all(struct llist_head *head)
{
return ({ typeof(&head->first) __ai_ptr = (&head->first); do { } while (0); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__(*(__ai_ptr)) _x_ = (((void *)0)); (__typeof__(*(__ai_ptr))) ({ __typeof__((__ai_ptr)) __ptr = ((__ai_ptr)); __typeof__(_x_) __new = (_x_); __typeof__(*((__ai_ptr))) __ret; switch (sizeof(*(__ai_ptr))) { case 4: __asm__ __volatile__ ( " amoswap.w.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_55(void) ; if (!(!(1))) __compiletime_assert_55(); } while (0); } __ret; }); }); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct llist_node *__llist_del_all(struct llist_head *head)
{
struct llist_node *first = head->first;

head->first = ((void *)0);
return first;
}

extern struct llist_node *llist_del_first(struct llist_head *head);

struct llist_node *llist_reverse_order(struct llist_node *head);
# 6 "./include/linux/smp_types.h" 2

enum {
CSD_FLAG_LOCK = 0x01,

IRQ_WORK_PENDING = 0x01,
IRQ_WORK_BUSY = 0x02,
IRQ_WORK_LAZY = 0x04,
IRQ_WORK_HARD_IRQ = 0x08,

IRQ_WORK_CLAIMED = (IRQ_WORK_PENDING | IRQ_WORK_BUSY),

CSD_TYPE_ASYNC = 0x00,
CSD_TYPE_SYNC = 0x10,
CSD_TYPE_IRQ_WORK = 0x20,
CSD_TYPE_TTWU = 0x30,

CSD_FLAG_TYPE_MASK = 0xF0,
};
# 58 "./include/linux/smp_types.h"
struct __call_single_node {
struct llist_node llist;
union {
unsigned int u_flags;
atomic_t a_flags;
};

u16 src, dst;

};
# 16 "./include/linux/smp.h" 2

typedef void (*smp_call_func_t)(void *info);
typedef bool (*smp_cond_func_t)(int cpu, void *info);




struct __call_single_data {
struct __call_single_node node;
smp_call_func_t func;
void *info;
};





typedef struct __call_single_data call_single_data_t
__attribute__((__aligned__(sizeof(struct __call_single_data))));
# 45 "./include/linux/smp.h"
extern void __smp_call_single_queue(int cpu, struct llist_node *node);


extern unsigned int total_cpus;

int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
int wait);

void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait, const struct cpumask *mask);

int smp_call_function_single_async(int cpu, struct __call_single_data *csd);





void panic_smp_self_stop(void);
void nmi_panic_self_stop(struct pt_regs *regs);
void crash_smp_send_stop(void);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void on_each_cpu(smp_call_func_t func, void *info, int wait)
{
on_each_cpu_cond_mask(((void *)0), func, info, wait, ((const struct cpumask *)&__cpu_online_mask));
}
# 90 "./include/linux/smp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void on_each_cpu_mask(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait)
{
on_each_cpu_cond_mask(((void *)0), func, info, wait, mask);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void on_each_cpu_cond(smp_cond_func_t cond_func,
smp_call_func_t func, void *info, bool wait)
{
on_each_cpu_cond_mask(cond_func, func, info, wait, ((const struct cpumask *)&__cpu_online_mask));
}







# 1 "./arch/riscv/include/asm/smp.h" 1
# 10 "./arch/riscv/include/asm/smp.h"
# 1 "./include/linux/irqreturn.h" 1
# 11 "./include/linux/irqreturn.h"
enum irqreturn {
IRQ_NONE = (0 << 0),
IRQ_HANDLED = (1 << 0),
IRQ_WAKE_THREAD = (1 << 1),
};

typedef enum irqreturn irqreturn_t;
# 11 "./arch/riscv/include/asm/smp.h" 2




struct seq_file;
extern unsigned long boot_cpu_hartid;

struct riscv_ipi_ops {
void (*ipi_inject)(const struct cpumask *target);
void (*ipi_clear)(void);
};





extern unsigned long __cpuid_to_hartid_map[32];



void show_ipi_stats(struct seq_file *p, int prec);


void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) setup_smp(void);


void handle_IPI(struct pt_regs *regs);


void arch_send_call_function_ipi_mask(struct cpumask *mask);


void arch_send_call_function_single_ipi(int cpu);

int riscv_hartid_to_cpuid(int hartid);


void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops);


void riscv_clear_ipi(void);


void smp_callin(void);
# 63 "./arch/riscv/include/asm/smp.h"
int __cpu_disable(void);
void __cpu_die(unsigned int cpu);
# 96 "./arch/riscv/include/asm/smp.h"
bool cpu_has_hotplug(unsigned int cpu);
# 114 "./include/linux/smp.h" 2
# 123 "./include/linux/smp.h"
extern void smp_send_stop(void);




extern void smp_send_reschedule(int cpu);





extern void smp_prepare_cpus(unsigned int max_cpus);




extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);




extern void smp_cpus_done(unsigned int max_cpus);




void smp_call_function(smp_call_func_t func, void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait);

int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait);

void kick_all_cpus_sync(void);
void wake_up_all_idle_cpus(void);




void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) call_function_init(void);
void generic_smp_call_function_single_interrupt(void);







void smp_prepare_boot_cpu(void);

extern unsigned int setup_max_cpus;
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) setup_nr_cpu_ids(void);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) smp_init(void);

extern int __boot_cpu_id;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_boot_cpu_id(void)
{
return __boot_cpu_id;
}
# 274 "./include/linux/smp.h"
extern void arch_disable_smp_support(void);

extern void arch_thaw_secondary_cpus_begin(void);
extern void arch_thaw_secondary_cpus_end(void);

void smp_setup_processor_id(void);

int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
bool phys);


int smpcfd_prepare_cpu(unsigned int cpu);
int smpcfd_dead_cpu(unsigned int cpu);
int smpcfd_dying_cpu(unsigned int cpu);
# 15 "./include/linux/lockdep.h" 2
# 1 "./arch/riscv/include/generated/asm/percpu.h" 1
# 16 "./include/linux/lockdep.h" 2

struct task_struct;


extern int prove_locking;
extern int lock_stat;






# 1 "./include/linux/debug_locks.h" 1







struct task_struct;

extern int debug_locks ;
extern int debug_locks_silent ;


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int __debug_locks_off(void)
{
return ({ typeof(&debug_locks) __ai_ptr = (&debug_locks); do { } while (0); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__(*(__ai_ptr)) _x_ = (0); (__typeof__(*(__ai_ptr))) ({ __typeof__((__ai_ptr)) __ptr = ((__ai_ptr)); __typeof__(_x_) __new = (_x_); __typeof__(*((__ai_ptr))) __ret; switch (sizeof(*(__ai_ptr))) { case 4: __asm__ __volatile__ ( " amoswap.w.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_56(void) ; if (!(!(1))) __compiletime_assert_56(); } while (0); } __ret; }); }); });
}




extern int debug_locks_off(void);
# 51 "./include/linux/debug_locks.h"
extern void debug_show_all_locks(void);
extern void debug_show_held_locks(struct task_struct *task);
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
extern void debug_check_no_locks_held(void);
# 28 "./include/linux/lockdep.h" 2
# 1 "./include/linux/stacktrace.h" 1





# 1 "./arch/riscv/include/generated/uapi/asm/errno.h" 1
# 7 "./include/linux/stacktrace.h" 2

struct task_struct;
struct pt_regs;
# 21 "./include/linux/stacktrace.h"
typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr);
# 38 "./include/linux/stacktrace.h"
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs);
# 58 "./include/linux/stacktrace.h"
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task);

void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
const struct pt_regs *regs);



void stack_trace_print(const unsigned long *trace, unsigned int nr_entries,
int spaces);
int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
unsigned int nr_entries, int spaces);
unsigned int stack_trace_save(unsigned long *store, unsigned int size,
unsigned int skipnr);
unsigned int stack_trace_save_tsk(struct task_struct *task,
unsigned long *store, unsigned int size,
unsigned int skipnr);
unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
unsigned int size, unsigned int skipnr);
unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries);
# 103 "./include/linux/stacktrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int stack_trace_save_tsk_reliable(struct task_struct *tsk,
unsigned long *store,
unsigned int size)
{
return -38;
}
# 29 "./include/linux/lockdep.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lockdep_copy_map(struct lockdep_map *to,
struct lockdep_map *from)
{
int i;

*to = *from;
# 44 "./include/linux/lockdep.h"
for (i = 0; i < 2; i++)
to->class_cache[i] = ((void *)0);
}





struct lock_list {
struct list_head entry;
struct lock_class *class;
struct lock_class *links_to;
const struct lock_trace *trace;
u16 distance;

u8 dep;

u8 only_xr;





struct lock_list *parent;
};
# 79 "./include/linux/lockdep.h"
struct lock_chain {

unsigned int irq_context : 2,
depth : 6,
base : 24;

struct hlist_node entry;
u64 chain_key;
};





struct held_lock {
# 108 "./include/linux/lockdep.h"
u64 prev_chain_key;
unsigned long acquire_ip;
struct lockdep_map *instance;
struct lockdep_map *nest_lock;
# 121 "./include/linux/lockdep.h"
unsigned int class_idx:13;
# 135 "./include/linux/lockdep.h"
unsigned int irq_context:2;
unsigned int trylock:1;

unsigned int read:2;
unsigned int check:1;
unsigned int hardirqs_off:1;
unsigned int references:12;
unsigned int pin_count;
};




extern void lockdep_init(void);
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
extern void lockdep_free_key_range(void *start, unsigned long size);
extern void lockdep_sys_exit(void);
extern void lockdep_set_selftest_task(struct task_struct *task);

extern void lockdep_init_task(struct task_struct *task);
# 179 "./include/linux/lockdep.h"
extern void lockdep_register_key(struct lock_class_key *key);
extern void lockdep_unregister_key(struct lock_class_key *key);







extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass, u8 inner, u8 outer)
{
lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass, u8 inner)
{
lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass)
{
lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
}
# 245 "./include/linux/lockdep.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int lockdep_match_key(struct lockdep_map *lock,
struct lock_class_key *key)
{
return lock->key == key;
}
# 265 "./include/linux/lockdep.h"
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lockdep_map *nest_lock, unsigned long ip);

extern void lock_release(struct lockdep_map *lock, unsigned long ip);
# 279 "./include/linux/lockdep.h"
extern int lock_is_held_type(const struct lockdep_map *lock, int read);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int lock_is_held(const struct lockdep_map *lock)
{
return lock_is_held_type(lock, -1);
}




extern void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lock_set_subclass(struct lockdep_map *lock,
unsigned int subclass, unsigned long ip)
{
lock_set_class(lock, lock->name, lock->key, subclass, ip);
}

extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);



extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
# 428 "./include/linux/lockdep.h"
enum xhlock_context_t {
XHLOCK_HARD,
XHLOCK_SOFT,
XHLOCK_CTX_NR,
};
# 442 "./include/linux/lockdep.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lockdep_invariant_state(bool force) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lockdep_free_task(struct task_struct *task) {}
# 487 "./include/linux/lockdep.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void print_irqtrace_events(struct task_struct *curr)
{
}
# 500 "./include/linux/lockdep.h"
extern bool read_lock_is_recursive(void);
# 653 "./include/linux/lockdep.h"
void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
# 63 "./include/linux/spinlock.h" 2

# 1 "./arch/riscv/include/asm/mmiowb.h" 1
# 13 "./arch/riscv/include/asm/mmiowb.h"
# 1 "./include/asm-generic/mmiowb.h" 1
# 23 "./include/asm-generic/mmiowb.h"
# 1 "./include/asm-generic/mmiowb_types.h" 1






struct mmiowb_state {
u16 nesting_count;
u16 mmiowb_pending;
};
# 24 "./include/asm-generic/mmiowb.h" 2


# 1 "./arch/riscv/include/generated/asm/percpu.h" 1
# 27 "./include/asm-generic/mmiowb.h" 2


extern __attribute__((section(".data..percpu" ""))) __typeof__(struct mmiowb_state) __mmiowb_state;





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmiowb_set_pending(void)
{
struct mmiowb_state *ms = ({ do { const void *__vpp_verify = (typeof((&__mmiowb_state) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&__mmiowb_state)) *)(&__mmiowb_state)); (typeof((typeof(*(&__mmiowb_state)) *)(&__mmiowb_state))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });

if (__builtin_expect(!!(ms->nesting_count), 1))
ms->mmiowb_pending = ms->nesting_count;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmiowb_spin_lock(void)
{
struct mmiowb_state *ms = ({ do { const void *__vpp_verify = (typeof((&__mmiowb_state) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&__mmiowb_state)) *)(&__mmiowb_state)); (typeof((typeof(*(&__mmiowb_state)) *)(&__mmiowb_state))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });
ms->nesting_count++;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmiowb_spin_unlock(void)
{
struct mmiowb_state *ms = ({ do { const void *__vpp_verify = (typeof((&__mmiowb_state) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&__mmiowb_state)) *)(&__mmiowb_state)); (typeof((typeof(*(&__mmiowb_state)) *)(&__mmiowb_state))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });

if (__builtin_expect(!!(ms->mmiowb_pending), 0)) {
ms->mmiowb_pending = 0;
__asm__ __volatile__ ("fence o,w" : : : "memory");;
}

ms->nesting_count--;
}
# 14 "./arch/riscv/include/asm/mmiowb.h" 2
# 65 "./include/linux/spinlock.h" 2
# 87 "./include/linux/spinlock.h"
# 1 "./include/linux/spinlock_types.h" 1
# 17 "./include/linux/spinlock_types.h"
typedef struct spinlock {
union {
struct raw_spinlock rlock;



struct {
u8 __padding[(__builtin_offsetof(struct raw_spinlock, dep_map))];
struct lockdep_map dep_map;
};

};
} spinlock_t;
# 74 "./include/linux/spinlock_types.h"
# 1 "./include/linux/rwlock_types.h" 1
# 25 "./include/linux/rwlock_types.h"
typedef struct {
arch_rwlock_t raw_lock;

unsigned int magic, owner_cpu;
void *owner;


struct lockdep_map dep_map;

} rwlock_t;
# 75 "./include/linux/spinlock_types.h" 2
# 88 "./include/linux/spinlock.h" 2





# 1 "./arch/riscv/include/asm/spinlock.h" 1
# 22 "./arch/riscv/include/asm/spinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_spin_unlock(arch_spinlock_t *lock)
{
do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_57(void) ; if (!((sizeof(*&lock->lock) == sizeof(char) || sizeof(*&lock->lock) == sizeof(short) || sizeof(*&lock->lock) == sizeof(int) || sizeof(*&lock->lock) == sizeof(long)))) __compiletime_assert_57(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_58(void) ; if (!((sizeof(*&lock->lock) == sizeof(char) || sizeof(*&lock->lock) == sizeof(short) || sizeof(*&lock->lock) == sizeof(int) || sizeof(*&lock->lock) == sizeof(long)) || sizeof(*&lock->lock) == sizeof(long long))) __compiletime_assert_58(); } while (0); do { *(volatile typeof(*&lock->lock) *)&(*&lock->lock) = (0); } while (0); } while (0); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_spin_trylock(arch_spinlock_t *lock)
{
int tmp = 1, busy;

__asm__ __volatile__ (
" amoswap.w %0, %2, %1\n"
"\tfence r , rw\n"
: "=r" (busy), "+A" (lock->lock)
: "r" (tmp)
: "memory");

return !busy;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_spin_lock(arch_spinlock_t *lock)
{
while (1) {
if ((({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_59(void) ; if (!((sizeof((lock)->lock) == sizeof(char) || sizeof((lock)->lock) == sizeof(short) || sizeof((lock)->lock) == sizeof(int) || sizeof((lock)->lock) == sizeof(long)) || sizeof((lock)->lock) == sizeof(long long))) __compiletime_assert_59(); } while (0); (*(const volatile typeof( _Generic(((lock)->lock), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((lock)->lock))) *)&((lock)->lock)); }) != 0))
continue;

if (arch_spin_trylock(lock))
break;
}
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_read_lock(arch_rwlock_t *lock)
{
int tmp;

__asm__ __volatile__(
"1: lr.w %1, %0\n"
" bltz %1, 1b\n"
" addi %1, %1, 1\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
"\tfence r , rw\n"
: "+A" (lock->lock), "=&r" (tmp)
:: "memory");
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_write_lock(arch_rwlock_t *lock)
{
int tmp;

__asm__ __volatile__(
"1: lr.w %1, %0\n"
" bnez %1, 1b\n"
" li %1, -1\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
"\tfence r , rw\n"
: "+A" (lock->lock), "=&r" (tmp)
:: "memory");
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_read_trylock(arch_rwlock_t *lock)
{
int busy;

__asm__ __volatile__(
"1: lr.w %1, %0\n"
" bltz %1, 1f\n"
" addi %1, %1, 1\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
"\tfence r , rw\n"
"1:\n"
: "+A" (lock->lock), "=&r" (busy)
:: "memory");

return !busy;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_write_trylock(arch_rwlock_t *lock)
{
int busy;

__asm__ __volatile__(
"1: lr.w %1, %0\n"
" bnez %1, 1f\n"
" li %1, -1\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
"\tfence r , rw\n"
"1:\n"
: "+A" (lock->lock), "=&r" (busy)
:: "memory");

return !busy;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_read_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
"\tfence rw, w\n"
" amoadd.w x0, %1, %0\n"
: "+A" (lock->lock)
: "r" (-1)
: "memory");
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_write_unlock(arch_rwlock_t *lock)
{
do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_60(void) ; if (!((sizeof(*&lock->lock) == sizeof(char) || sizeof(*&lock->lock) == sizeof(short) || sizeof(*&lock->lock) == sizeof(int) || sizeof(*&lock->lock) == sizeof(long)))) __compiletime_assert_60(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_61(void) ; if (!((sizeof(*&lock->lock) == sizeof(char) || sizeof(*&lock->lock) == sizeof(short) || sizeof(*&lock->lock) == sizeof(int) || sizeof(*&lock->lock) == sizeof(long)) || sizeof(*&lock->lock) == sizeof(long long))) __compiletime_assert_61(); } while (0); do { *(volatile typeof(*&lock->lock) *)&(*&lock->lock) = (0); } while (0); } while (0); } while (0); } while (0);
}
# 94 "./include/linux/spinlock.h" 2





extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key, short inner);
# 178 "./include/linux/spinlock.h"
extern void do_raw_spin_lock(raw_spinlock_t *lock) ;
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
extern void do_raw_spin_unlock(raw_spinlock_t *lock) ;
# 303 "./include/linux/spinlock.h"
# 1 "./include/linux/rwlock.h" 1
# 18 "./include/linux/rwlock.h"
extern void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key);
# 32 "./include/linux/rwlock.h"
extern void do_raw_read_lock(rwlock_t *lock) ;
extern int do_raw_read_trylock(rwlock_t *lock);
extern void do_raw_read_unlock(rwlock_t *lock) ;
extern void do_raw_write_lock(rwlock_t *lock) ;
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) ;
# 304 "./include/linux/spinlock.h" 2






# 1 "./include/linux/spinlock_api_smp.h" 1
# 18 "./include/linux/spinlock_api_smp.h"
int in_lock_functions(unsigned long addr);



void __attribute__((__section__(".spinlock.text"))) _raw_spin_lock(raw_spinlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
;
void __attribute__((__section__(".spinlock.text")))
_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
;
void __attribute__((__section__(".spinlock.text"))) _raw_spin_lock_bh(raw_spinlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_spin_lock_irq(raw_spinlock_t *lock)
;

unsigned long __attribute__((__section__(".spinlock.text"))) _raw_spin_lock_irqsave(raw_spinlock_t *lock)
;
unsigned long __attribute__((__section__(".spinlock.text")))
_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
;
int __attribute__((__section__(".spinlock.text"))) _raw_spin_trylock(raw_spinlock_t *lock);
int __attribute__((__section__(".spinlock.text"))) _raw_spin_trylock_bh(raw_spinlock_t *lock);
void __attribute__((__section__(".spinlock.text"))) _raw_spin_unlock(raw_spinlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_spin_unlock_bh(raw_spinlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_spin_unlock_irq(raw_spinlock_t *lock) ;
void __attribute__((__section__(".spinlock.text")))
_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
;
# 86 "./include/linux/spinlock_api_smp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __raw_spin_trylock(raw_spinlock_t *lock)
{
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
if (do_raw_spin_trylock(lock)) {
lock_acquire(&lock->dep_map, 0, 1, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
return 1;
}
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
return 0;
}
# 104 "./include/linux/spinlock_api_smp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); if (!({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) trace_hardirqs_off(); } while (0);
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
do_raw_spin_lock(lock);
return flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_spin_lock_irq(raw_spinlock_t *lock)
{
do { bool was_disabled = (arch_irqs_disabled()); arch_local_irq_disable(); if (!was_disabled) trace_hardirqs_off(); } while (0);
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
do_raw_spin_lock(lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
__local_bh_disable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0)));
lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
do_raw_spin_lock(lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_spin_lock(raw_spinlock_t *lock)
{
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
do_raw_spin_lock(lock);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_spin_unlock(raw_spinlock_t *lock)
{
lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0));
do_raw_spin_unlock(lock);
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0));
do_raw_spin_unlock(lock);
do { if (!({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(flags); } while (0); } while (0);
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_spin_unlock_irq(raw_spinlock_t *lock)
{
lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0));
do_raw_spin_unlock(lock);
do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0);
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0));
do_raw_spin_unlock(lock);
__local_bh_enable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __raw_spin_trylock_bh(raw_spinlock_t *lock)
{
__local_bh_disable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0)));
if (do_raw_spin_trylock(lock)) {
lock_acquire(&lock->dep_map, 0, 1, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
return 1;
}
__local_bh_enable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0)));
return 0;
}




# 1 "./include/linux/rwlock_api_smp.h" 1
# 18 "./include/linux/rwlock_api_smp.h"
void __attribute__((__section__(".spinlock.text"))) _raw_read_lock(rwlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_write_lock(rwlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_write_lock_nested(rwlock_t *lock, int subclass) ;
void __attribute__((__section__(".spinlock.text"))) _raw_read_lock_bh(rwlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_write_lock_bh(rwlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_read_lock_irq(rwlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_write_lock_irq(rwlock_t *lock) ;
unsigned long __attribute__((__section__(".spinlock.text"))) _raw_read_lock_irqsave(rwlock_t *lock)
;
unsigned long __attribute__((__section__(".spinlock.text"))) _raw_write_lock_irqsave(rwlock_t *lock)
;
int __attribute__((__section__(".spinlock.text"))) _raw_read_trylock(rwlock_t *lock);
int __attribute__((__section__(".spinlock.text"))) _raw_write_trylock(rwlock_t *lock);
void __attribute__((__section__(".spinlock.text"))) _raw_read_unlock(rwlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_write_unlock(rwlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_read_unlock_bh(rwlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_write_unlock_bh(rwlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_read_unlock_irq(rwlock_t *lock) ;
void __attribute__((__section__(".spinlock.text"))) _raw_write_unlock_irq(rwlock_t *lock) ;
void __attribute__((__section__(".spinlock.text")))
_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
;
void __attribute__((__section__(".spinlock.text")))
_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
;
# 118 "./include/linux/rwlock_api_smp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __raw_read_trylock(rwlock_t *lock)
{
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
if (do_raw_read_trylock(lock)) {
do { if (read_lock_is_recursive()) lock_acquire(&lock->dep_map, 0, 1, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); else lock_acquire(&lock->dep_map, 0, 1, 1, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); } while (0);
return 1;
}
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __raw_write_trylock(rwlock_t *lock)
{
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
if (do_raw_write_trylock(lock)) {
lock_acquire(&lock->dep_map, 0, 1, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
return 1;
}
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
return 0;
}
# 147 "./include/linux/rwlock_api_smp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_read_lock(rwlock_t *lock)
{
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
do { if (read_lock_is_recursive()) lock_acquire(&lock->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); else lock_acquire(&lock->dep_map, 0, 0, 1, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); } while (0);
do_raw_read_lock(lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
{
unsigned long flags;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); if (!({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) trace_hardirqs_off(); } while (0);
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
do { if (read_lock_is_recursive()) lock_acquire(&lock->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); else lock_acquire(&lock->dep_map, 0, 0, 1, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); } while (0);
do_raw_read_lock(lock);
return flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_read_lock_irq(rwlock_t *lock)
{
do { bool was_disabled = (arch_irqs_disabled()); arch_local_irq_disable(); if (!was_disabled) trace_hardirqs_off(); } while (0);
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
do { if (read_lock_is_recursive()) lock_acquire(&lock->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); else lock_acquire(&lock->dep_map, 0, 0, 1, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); } while (0);
do_raw_read_lock(lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_read_lock_bh(rwlock_t *lock)
{
__local_bh_disable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0)));
do { if (read_lock_is_recursive()) lock_acquire(&lock->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); else lock_acquire(&lock->dep_map, 0, 0, 1, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); } while (0);
do_raw_read_lock(lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
{
unsigned long flags;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); if (!({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) trace_hardirqs_off(); } while (0);
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
do_raw_write_lock(lock);
return flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_write_lock_irq(rwlock_t *lock)
{
do { bool was_disabled = (arch_irqs_disabled()); arch_local_irq_disable(); if (!was_disabled) trace_hardirqs_off(); } while (0);
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
do_raw_write_lock(lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_write_lock_bh(rwlock_t *lock)
{
__local_bh_disable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0)));
lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
do_raw_write_lock(lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_write_lock(rwlock_t *lock)
{
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
do_raw_write_lock(lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_write_lock_nested(rwlock_t *lock, int subclass)
{
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
lock_acquire(&lock->dep_map, subclass, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
do_raw_write_lock(lock);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_write_unlock(rwlock_t *lock)
{
lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0));
do_raw_write_unlock(lock);
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_read_unlock(rwlock_t *lock)
{
lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0));
do_raw_read_unlock(lock);
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0));
do_raw_read_unlock(lock);
do { if (!({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(flags); } while (0); } while (0);
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_read_unlock_irq(rwlock_t *lock)
{
lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0));
do_raw_read_unlock(lock);
do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0);
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_read_unlock_bh(rwlock_t *lock)
{
lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0));
do_raw_read_unlock(lock);
__local_bh_enable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_write_unlock_irqrestore(rwlock_t *lock,
unsigned long flags)
{
lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0));
do_raw_write_unlock(lock);
do { if (!({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(flags); } while (0); } while (0);
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_write_unlock_irq(rwlock_t *lock)
{
lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0));
do_raw_write_unlock(lock);
do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0);
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_write_unlock_bh(rwlock_t *lock)
{
lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0));
do_raw_write_unlock(lock);
__local_bh_enable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0)));
}
# 184 "./include/linux/spinlock_api_smp.h" 2
# 311 "./include/linux/spinlock.h" 2
# 322 "./include/linux/spinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
# 347 "./include/linux/spinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void spin_lock(spinlock_t *lock)
{
_raw_spin_lock(&lock->rlock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void spin_lock_bh(spinlock_t *lock)
{
_raw_spin_lock_bh(&lock->rlock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int spin_trylock(spinlock_t *lock)
{
return (_raw_spin_trylock(&lock->rlock));
}
# 372 "./include/linux/spinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void spin_lock_irq(spinlock_t *lock)
{
_raw_spin_lock_irq(&lock->rlock);
}
# 387 "./include/linux/spinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void spin_unlock(spinlock_t *lock)
{
_raw_spin_unlock(&lock->rlock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void spin_unlock_bh(spinlock_t *lock)
{
_raw_spin_unlock_bh(&lock->rlock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void spin_unlock_irq(spinlock_t *lock)
{
_raw_spin_unlock_irq(&lock->rlock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _raw_spin_unlock_irqrestore(&lock->rlock, flags); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int spin_trylock_bh(spinlock_t *lock)
{
return (_raw_spin_trylock_bh(&lock->rlock));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int spin_trylock_irq(spinlock_t *lock)
{
return ({ do { bool was_disabled = (arch_irqs_disabled()); arch_local_irq_disable(); if (!was_disabled) trace_hardirqs_off(); } while (0); (_raw_spin_trylock(&lock->rlock)) ? 1 : ({ do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0); 0; }); });
}
# 440 "./include/linux/spinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int spin_is_locked(spinlock_t *lock)
{
return (({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_62(void) ; if (!((sizeof((&(&lock->rlock)->raw_lock)->lock) == sizeof(char) || sizeof((&(&lock->rlock)->raw_lock)->lock) == sizeof(short) || sizeof((&(&lock->rlock)->raw_lock)->lock) == sizeof(int) || sizeof((&(&lock->rlock)->raw_lock)->lock) == sizeof(long)) || sizeof((&(&lock->rlock)->raw_lock)->lock) == sizeof(long long))) __compiletime_assert_62(); } while (0); (*(const volatile typeof( _Generic(((&(&lock->rlock)->raw_lock)->lock), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&(&lock->rlock)->raw_lock)->lock))) *)&((&(&lock->rlock)->raw_lock)->lock)); }) != 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int spin_is_contended(spinlock_t *lock)
{
return (((void)(&lock->rlock), 0));
}
# 469 "./include/linux/spinlock.h"
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);



extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
unsigned long *flags);



int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp, const char *name,
struct lock_class_key *key);
# 493 "./include/linux/spinlock.h"
void free_bucket_spinlocks(spinlock_t *locks);
# 17 "./include/linux/kref.h" 2
# 1 "./include/linux/refcount.h" 1
# 101 "./include/linux/refcount.h"
struct mutex;
# 111 "./include/linux/refcount.h"
typedef struct refcount_struct {
atomic_t refs;
} refcount_t;





enum refcount_saturation_type {
REFCOUNT_ADD_NOT_ZERO_OVF,
REFCOUNT_ADD_OVF,
REFCOUNT_ADD_UAF,
REFCOUNT_SUB_UAF,
REFCOUNT_DEC_LEAK,
};

void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void refcount_set(refcount_t *r, int n)
{
atomic_set(&r->refs, n);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int refcount_read(const refcount_t *r)
{
return atomic_read(&r->refs);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
{
int old = refcount_read(r);

do {
if (!old)
break;
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));

if (oldp)
*oldp = old;

if (__builtin_expect(!!(old < 0 || old + i < 0), 0))
refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);

return old;
}
# 186 "./include/linux/refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) bool refcount_add_not_zero(int i, refcount_t *r)
{
return __refcount_add_not_zero(i, r, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __refcount_add(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_add_relaxed(i, &r->refs);

if (oldp)
*oldp = old;

if (__builtin_expect(!!(!old), 0))
refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
else if (__builtin_expect(!!(old < 0 || old + i < 0), 0))
refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
}
# 220 "./include/linux/refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void refcount_add(int i, refcount_t *r)
{
__refcount_add(i, r, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
{
return __refcount_add_not_zero(1, r, oldp);
}
# 243 "./include/linux/refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) bool refcount_inc_not_zero(refcount_t *r)
{
return __refcount_inc_not_zero(r, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __refcount_inc(refcount_t *r, int *oldp)
{
__refcount_add(1, r, oldp);
}
# 265 "./include/linux/refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void refcount_inc(refcount_t *r)
{
__refcount_inc(r, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_sub_release(i, &r->refs);

if (oldp)
*oldp = old;

if (old == i) {
do { do { } while (0); __asm__ __volatile__ ("fence " "r" "," "r" : : : "memory"); } while (0);
return true;
}

if (__builtin_expect(!!(old < 0 || old - i < 0), 0))
refcount_warn_saturate(r, REFCOUNT_SUB_UAF);

return false;
}
# 308 "./include/linux/refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) bool refcount_sub_and_test(int i, refcount_t *r)
{
return __refcount_sub_and_test(i, r, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) bool __refcount_dec_and_test(refcount_t *r, int *oldp)
{
return __refcount_sub_and_test(1, r, oldp);
}
# 331 "./include/linux/refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) bool refcount_dec_and_test(refcount_t *r)
{
return __refcount_dec_and_test(r, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __refcount_dec(refcount_t *r, int *oldp)
{
int old = atomic_fetch_sub_release(1, &r->refs);

if (oldp)
*oldp = old;

if (__builtin_expect(!!(old <= 1), 0))
refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
}
# 357 "./include/linux/refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void refcount_dec(refcount_t *r)
{
__refcount_dec(r, ((void *)0));
}

extern __attribute__((__warn_unused_result__)) bool refcount_dec_if_one(refcount_t *r);
extern __attribute__((__warn_unused_result__)) bool refcount_dec_not_one(refcount_t *r);
extern __attribute__((__warn_unused_result__)) bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
extern __attribute__((__warn_unused_result__)) bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
extern __attribute__((__warn_unused_result__)) bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
unsigned long *flags);
# 18 "./include/linux/kref.h" 2

struct kref {
refcount_t refcount;
};







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kref_init(struct kref *kref)
{
refcount_set(&kref->refcount, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int kref_read(const struct kref *kref)
{
return refcount_read(&kref->refcount);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kref_get(struct kref *kref)
{
refcount_inc(&kref->refcount);
}
# 62 "./include/linux/kref.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
if (refcount_dec_and_test(&kref->refcount)) {
release(kref);
return 1;
}
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
{
if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
release(kref);
return 1;
}
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
if (refcount_dec_and_lock(&kref->refcount, lock)) {
release(kref);
return 1;
}
return 0;
}
# 109 "./include/linux/kref.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) kref_get_unless_zero(struct kref *kref)
{
return refcount_inc_not_zero(&kref->refcount);
}
# 9 "./include/linux/mm_types.h" 2


# 1 "./include/linux/rbtree.h" 1
# 20 "./include/linux/rbtree.h"
# 1 "./include/linux/rbtree_types.h" 1




struct rb_node {
unsigned long __rb_parent_color;
struct rb_node *rb_right;
struct rb_node *rb_left;
} __attribute__((aligned(sizeof(long))));


struct rb_root {
struct rb_node *rb_node;
};
# 26 "./include/linux/rbtree_types.h"
struct rb_root_cached {
struct rb_root rb_root;
struct rb_node *rb_leftmost;
};
# 21 "./include/linux/rbtree.h" 2



# 1 "./include/linux/rcupdate.h" 1
# 40 "./include/linux/rcupdate.h"
void call_rcu(struct callback_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
void rcu_barrier_tasks_rude(void);
void synchronize_rcu(void);
# 63 "./include/linux/rcupdate.h"
void rcu_read_unlock_strict(void);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __rcu_read_lock(void)
{
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __rcu_read_unlock(void)
{
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
if (0)
rcu_read_unlock_strict();
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rcu_preempt_depth(void)
{
return 0;
}




void rcu_init(void);
extern int rcu_scheduler_active;
void rcu_sched_clock_irq(int user);
void rcu_report_dead(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu);


void rcu_init_tasks_generic(void);





void rcu_sysrq_start(void);
void rcu_sysrq_end(void);
# 110 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_user_enter(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_user_exit(void) { }
# 120 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_init_nohz(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rcu_nocb_cpu_offload(int cpu) { return -22; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_nocb_flush_deferred_wakeup(void) { }
# 191 "./include/linux/rcupdate.h"
void call_rcu_tasks_rude(struct callback_head *head, rcu_callback_t func);
void synchronize_rcu_tasks_rude(void);



void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void);
# 226 "./include/linux/rcupdate.h"
# 1 "./include/linux/rcutree.h" 1
# 20 "./include/linux/rcutree.h"
void rcu_softirq_qs(void);
void rcu_note_context_switch(bool preempt);
int rcu_needs_cpu(void);
void rcu_cpu_stall_reset(void);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_virt_note_context_switch(int cpu)
{
rcu_note_context_switch(false);
}

void synchronize_rcu_expedited(void);
void kvfree_call_rcu(struct callback_head *head, rcu_callback_t func);

void rcu_barrier(void);
bool rcu_eqs_special_set(int cpu);
void rcu_momentary_dyntick_idle(void);
void kfree_rcu_scheduler_running(void);
bool rcu_gp_might_be_stalled(void);
unsigned long get_state_synchronize_rcu(void);
unsigned long start_poll_synchronize_rcu(void);
bool poll_state_synchronize_rcu(unsigned long oldstate);
void cond_synchronize_rcu(unsigned long oldstate);

void rcu_idle_enter(void);
void rcu_idle_exit(void);
void rcu_irq_enter(void);
void rcu_irq_exit(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
bool rcu_is_idle_cpu(int cpu);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_irq_exit_check_preempt(void) { }


void exit_rcu(void);

void rcu_scheduler_starting(void);
extern int rcu_scheduler_active;
void rcu_end_inkernel_boot(void);
bool rcu_inkernel_boot_has_ended(void);
bool rcu_is_watching(void);

void rcu_all_qs(void);



int rcutree_prepare_cpu(unsigned int cpu);
int rcutree_online_cpu(unsigned int cpu);
int rcutree_offline_cpu(unsigned int cpu);
int rcutree_dead_cpu(unsigned int cpu);
int rcutree_dying_cpu(unsigned int cpu);
void rcu_cpu_starting(unsigned int cpu);
# 227 "./include/linux/rcupdate.h" 2
# 247 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void init_rcu_head(struct callback_head *head) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void destroy_rcu_head(struct callback_head *head) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void init_rcu_head_on_stack(struct callback_head *head) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void destroy_rcu_head_on_stack(struct callback_head *head) { }





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool rcu_lockdep_current_cpu_online(void) { return true; }


extern struct lockdep_map rcu_lock_map;
extern struct lockdep_map rcu_bh_lock_map;
extern struct lockdep_map rcu_sched_lock_map;
extern struct lockdep_map rcu_callback_map;



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_lock_acquire(struct lockdep_map *map)
{
lock_acquire(map, 0, 0, 2, 0, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_lock_release(struct lockdep_map *map)
{
lock_release(map, ({ __label__ __here; __here: (unsigned long)&&__here; }));
}

int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
int rcu_read_lock_bh_held(void);
int rcu_read_lock_sched_held(void);
int rcu_read_lock_any_held(void);
# 690 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void rcu_read_lock(void)
{
__rcu_read_lock();
(void)0;
rcu_lock_acquire(&rcu_lock_map);
do { } while (0 && (!rcu_is_watching()));

}
# 721 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_read_unlock(void)
{
do { } while (0 && (!rcu_is_watching()));

(void)0;
__rcu_read_unlock();
rcu_lock_release(&rcu_lock_map);
}
# 744 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_read_lock_bh(void)
{
local_bh_disable();
(void)0;
rcu_lock_acquire(&rcu_bh_lock_map);
do { } while (0 && (!rcu_is_watching()));

}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_read_unlock_bh(void)
{
do { } while (0 && (!rcu_is_watching()));

rcu_lock_release(&rcu_bh_lock_map);
(void)0;
local_bh_enable();
}
# 782 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_read_lock_sched(void)
{
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
(void)0;
rcu_lock_acquire(&rcu_sched_lock_map);
do { } while (0 && (!rcu_is_watching()));

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((patchable_function_entry(0, 0))) void rcu_read_lock_sched_notrace(void)
{
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
(void)0;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_read_unlock_sched(void)
{
do { } while (0 && (!rcu_is_watching()));

rcu_lock_release(&rcu_sched_lock_map);
(void)0;
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((patchable_function_entry(0, 0))) void rcu_read_unlock_sched_notrace(void)
{
(void)0;
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}
# 982 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcu_head_init(struct callback_head *rhp)
{
rhp->func = (rcu_callback_t)~0L;
}
# 1000 "./include/linux/rcupdate.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
rcu_head_after_call_rcu(struct callback_head *rhp, rcu_callback_t f)
{
rcu_callback_t func = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_63(void) ; if (!((sizeof(rhp->func) == sizeof(char) || sizeof(rhp->func) == sizeof(short) || sizeof(rhp->func) == sizeof(int) || sizeof(rhp->func) == sizeof(long)) || sizeof(rhp->func) == sizeof(long long))) __compiletime_assert_63(); } while (0); (*(const volatile typeof( _Generic((rhp->func), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rhp->func))) *)&(rhp->func)); });

if (func == f)
return true;
({ int __ret_warn_on = !!(func != (rcu_callback_t)~0L); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/rcupdate.h"), "i" (1007), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return false;
}


extern int rcu_expedited;
extern int rcu_normal;
# 25 "./include/linux/rbtree.h" 2
# 39 "./include/linux/rbtree.h"
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);



extern struct rb_node *rb_next(const struct rb_node *);
extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);


extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);


extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = ((void *)0);

*rb_link = node;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = ((void *)0);

do { uintptr_t _r_a_p__v = (uintptr_t)(node); ; if (__builtin_constant_p(node) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_64(void) ; if (!((sizeof((*rb_link)) == sizeof(char) || sizeof((*rb_link)) == sizeof(short) || sizeof((*rb_link)) == sizeof(int) || sizeof((*rb_link)) == sizeof(long)) || sizeof((*rb_link)) == sizeof(long long))) __compiletime_assert_64(); } while (0); do { *(volatile typeof((*rb_link)) *)&((*rb_link)) = ((typeof(*rb_link))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_65(void) ; if (!((sizeof(*&*rb_link) == sizeof(char) || sizeof(*&*rb_link) == sizeof(short) || sizeof(*&*rb_link) == sizeof(int) || sizeof(*&*rb_link) == sizeof(long)))) __compiletime_assert_65(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_66(void) ; if (!((sizeof(*&*rb_link) == sizeof(char) || sizeof(*&*rb_link) == sizeof(short) || sizeof(*&*rb_link) == sizeof(int) || sizeof(*&*rb_link) == sizeof(long)) || sizeof(*&*rb_link) == sizeof(long long))) __compiletime_assert_66(); } while (0); do { *(volatile typeof(*&*rb_link) *)&(*&*rb_link) = ((typeof(*((typeof(*rb_link))_r_a_p__v)) *)((typeof(*rb_link))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
}
# 108 "./include/linux/rbtree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rb_insert_color_cached(struct rb_node *node,
struct rb_root_cached *root,
bool leftmost)
{
if (leftmost)
root->rb_leftmost = node;
rb_insert_color(node, &root->rb_root);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct rb_node *
rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
{
struct rb_node *leftmost = ((void *)0);

if (root->rb_leftmost == node)
leftmost = root->rb_leftmost = rb_next(node);

rb_erase(node, &root->rb_root);

return leftmost;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rb_replace_node_cached(struct rb_node *victim,
struct rb_node *new,
struct rb_root_cached *root)
{
if (root->rb_leftmost == victim)
root->rb_leftmost = new;
rb_replace_node(victim, new, &root->rb_root);
}
# 164 "./include/linux/rbtree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) struct rb_node *
rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
bool (*less)(struct rb_node *, const struct rb_node *))
{
struct rb_node **link = &tree->rb_root.rb_node;
struct rb_node *parent = ((void *)0);
bool leftmost = true;

while (*link) {
parent = *link;
if (less(node, parent)) {
link = &parent->rb_left;
} else {
link = &parent->rb_right;
leftmost = false;
}
}

rb_link_node(node, parent, link);
rb_insert_color_cached(node, tree, leftmost);

return leftmost ? node : ((void *)0);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
rb_add(struct rb_node *node, struct rb_root *tree,
bool (*less)(struct rb_node *, const struct rb_node *))
{
struct rb_node **link = &tree->rb_node;
struct rb_node *parent = ((void *)0);

while (*link) {
parent = *link;
if (less(node, parent))
link = &parent->rb_left;
else
link = &parent->rb_right;
}

rb_link_node(node, parent, link);
rb_insert_color(node, tree);
}
# 222 "./include/linux/rbtree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) struct rb_node *
rb_find_add(struct rb_node *node, struct rb_root *tree,
int (*cmp)(struct rb_node *, const struct rb_node *))
{
struct rb_node **link = &tree->rb_node;
struct rb_node *parent = ((void *)0);
int c;

while (*link) {
parent = *link;
c = cmp(node, parent);

if (c < 0)
link = &parent->rb_left;
else if (c > 0)
link = &parent->rb_right;
else
return parent;
}

rb_link_node(node, parent, link);
rb_insert_color(node, tree);
return ((void *)0);
}
# 255 "./include/linux/rbtree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) struct rb_node *
rb_find(const void *key, const struct rb_root *tree,
int (*cmp)(const void *key, const struct rb_node *))
{
struct rb_node *node = tree->rb_node;

while (node) {
int c = cmp(key, node);

if (c < 0)
node = node->rb_left;
else if (c > 0)
node = node->rb_right;
else
return node;
}

return ((void *)0);
}
# 283 "./include/linux/rbtree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) struct rb_node *
rb_find_first(const void *key, const struct rb_root *tree,
int (*cmp)(const void *key, const struct rb_node *))
{
struct rb_node *node = tree->rb_node;
struct rb_node *match = ((void *)0);

while (node) {
int c = cmp(key, node);

if (c <= 0) {
if (!c)
match = node;
node = node->rb_left;
} else if (c > 0) {
node = node->rb_right;
}
}

return match;
}
# 313 "./include/linux/rbtree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) struct rb_node *
rb_next_match(const void *key, struct rb_node *node,
int (*cmp)(const void *key, const struct rb_node *))
{
node = rb_next(node);
if (node && cmp(key, node))
node = ((void *)0);
return node;
}
# 12 "./include/linux/mm_types.h" 2
# 1 "./include/linux/rwsem.h" 1
# 32 "./include/linux/rwsem.h"
# 1 "./include/linux/osq_lock.h" 1








struct optimistic_spin_node {
struct optimistic_spin_node *next, *prev;
int locked;
int cpu;
};

struct optimistic_spin_queue {




atomic_t tail;
};






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void osq_lock_init(struct optimistic_spin_queue *lock)
{
atomic_set(&lock->tail, (0));
}

extern bool osq_lock(struct optimistic_spin_queue *lock);
extern void osq_unlock(struct optimistic_spin_queue *lock);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool osq_is_locked(struct optimistic_spin_queue *lock)
{
return atomic_read(&lock->tail) != (0);
}
# 33 "./include/linux/rwsem.h" 2
# 47 "./include/linux/rwsem.h"
struct rw_semaphore {
atomic_long_t count;





atomic_long_t owner;

struct optimistic_spin_queue osq;

raw_spinlock_t wait_lock;
struct list_head wait_list;




struct lockdep_map dep_map;

};


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rwsem_is_locked(struct rw_semaphore *sem)
{
return atomic_long_read(&sem->count) != 0;
}
# 103 "./include/linux/rwsem.h"
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key);
# 119 "./include/linux/rwsem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rwsem_is_contended(struct rw_semaphore *sem)
{
return !list_empty(&sem->wait_list);
}
# 174 "./include/linux/rwsem.h"
extern void down_read(struct rw_semaphore *sem);
extern int __attribute__((__warn_unused_result__)) down_read_interruptible(struct rw_semaphore *sem);
extern int __attribute__((__warn_unused_result__)) down_read_killable(struct rw_semaphore *sem);




extern int down_read_trylock(struct rw_semaphore *sem);




extern void down_write(struct rw_semaphore *sem);
extern int __attribute__((__warn_unused_result__)) down_write_killable(struct rw_semaphore *sem);




extern int down_write_trylock(struct rw_semaphore *sem);




extern void up_read(struct rw_semaphore *sem);




extern void up_write(struct rw_semaphore *sem);




extern void downgrade_write(struct rw_semaphore *sem);
# 223 "./include/linux/rwsem.h"
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern int __attribute__((__warn_unused_result__)) down_read_killable_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
# 241 "./include/linux/rwsem.h"
extern void down_read_non_owner(struct rw_semaphore *sem);
extern void up_read_non_owner(struct rw_semaphore *sem);
# 13 "./include/linux/mm_types.h" 2
# 1 "./include/linux/completion.h" 1
# 12 "./include/linux/completion.h"
# 1 "./include/linux/swait.h" 1







# 1 "./include/linux/wait.h" 1
# 12 "./include/linux/wait.h"
# 1 "./include/uapi/linux/wait.h" 1
# 13 "./include/linux/wait.h" 2

typedef struct wait_queue_entry wait_queue_entry_t;

typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
# 30 "./include/linux/wait.h"
struct wait_queue_entry {
unsigned int flags;
void *private;
wait_queue_func_t func;
struct list_head entry;
};

struct wait_queue_head {
spinlock_t lock;
struct list_head head;
};
typedef struct wait_queue_head wait_queue_head_t;

struct task_struct;
# 64 "./include/linux/wait.h"
extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
# 82 "./include/linux/wait.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
{
wq_entry->flags = 0;
wq_entry->private = p;
wq_entry->func = default_wake_function;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
{
wq_entry->flags = 0;
wq_entry->private = ((void *)0);
wq_entry->func = func;
}
# 127 "./include/linux/wait.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int waitqueue_active(struct wait_queue_head *wq_head)
{
return !list_empty(&wq_head->head);
}
# 140 "./include/linux/wait.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
{
return list_is_singular(&wq_head->head);
}
# 153 "./include/linux/wait.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool wq_has_sleeper(struct wait_queue_head *wq_head)
{







do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);
return waitqueue_active(wq_head);
}

extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
struct list_head *head = &wq_head->head;
struct wait_queue_entry *wq;

for (wq = ({ void *__mptr = (void *)((&wq_head->head)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&wq_head->head)->next)), typeof(((typeof(*wq) *)0)->entry)) || __builtin_types_compatible_p(typeof(*((&wq_head->head)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*wq) *)(__mptr - __builtin_offsetof(typeof(*wq), entry))); }); !(&wq->entry == (&wq_head->head)); wq = ({ void *__mptr = (void *)((wq)->entry.next); _Static_assert(__builtin_types_compatible_p(typeof(*((wq)->entry.next)), typeof(((typeof(*(wq)) *)0)->entry)) || __builtin_types_compatible_p(typeof(*((wq)->entry.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(wq)) *)(__mptr - __builtin_offsetof(typeof(*(wq)), entry))); })) {
if (!(wq->flags & 0x20))
break;
head = &wq->entry;
}
list_add(&wq_entry->entry, head);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
wq_entry->flags |= 0x01;
__add_wait_queue(wq_head, wq_entry);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
list_add_tail(&wq_entry->entry, &wq_head->head);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
wq_entry->flags |= 0x01;
__add_wait_queue_entry_tail(wq_head, wq_entry);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
list_del(&wq_entry->entry);
}

void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
unsigned int mode, void *key, wait_queue_entry_t *bookmark);
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
void __wake_up_pollfree(struct wait_queue_head *wq_head);
# 261 "./include/linux/wait.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wake_up_pollfree(struct wait_queue_head *wq_head)
{







if (waitqueue_active(wq_head))
__wake_up_pollfree(wq_head);
}
# 286 "./include/linux/wait.h"
extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
# 770 "./include/linux/wait.h"
extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
# 1164 "./include/linux/wait.h"
void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
# 1189 "./include/linux/wait.h"
typedef int (*task_call_f)(struct task_struct *p, void *arg);
extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
# 9 "./include/linux/swait.h" 2
# 41 "./include/linux/swait.h"
struct task_struct;

struct swait_queue_head {
raw_spinlock_t lock;
struct list_head task_list;
};

struct swait_queue {
struct task_struct *task;
struct list_head task_list;
};
# 69 "./include/linux/swait.h"
extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
struct lock_class_key *key);
# 121 "./include/linux/swait.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int swait_active(struct swait_queue_head *wq)
{
return !list_empty(&wq->task_list);
}
# 134 "./include/linux/swait.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool swq_has_sleeper(struct swait_queue_head *wq)
{







do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);
return swait_active(wq);
}

extern void swake_up_one(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
extern void swake_up_locked(struct swait_queue_head *q);

extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);

extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
# 13 "./include/linux/completion.h" 2
# 26 "./include/linux/completion.h"
struct completion {
unsigned int done;
struct swait_queue_head wait;
};


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void complete_acquire(struct completion *x) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void complete_release(struct completion *x) {}
# 84 "./include/linux/completion.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void init_completion(struct completion *x)
{
x->done = 0;
do { static struct lock_class_key __key; __init_swait_queue_head((&x->wait), "&x->wait", &__key); } while (0);
}
# 97 "./include/linux/completion.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void reinit_completion(struct completion *x)
{
x->done = 0;
}

extern void wait_for_completion(struct completion *);
extern void wait_for_completion_io(struct completion *);
extern int wait_for_completion_interruptible(struct completion *x);
extern int wait_for_completion_killable(struct completion *x);
extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
extern unsigned long wait_for_completion_io_timeout(struct completion *x,
unsigned long timeout);
extern long wait_for_completion_interruptible_timeout(
struct completion *x, unsigned long timeout);
extern long wait_for_completion_killable_timeout(
struct completion *x, unsigned long timeout);
extern bool try_wait_for_completion(struct completion *x);
extern bool completion_done(struct completion *x);

extern void complete(struct completion *);
extern void complete_all(struct completion *);
# 14 "./include/linux/mm_types.h" 2

# 1 "./include/linux/uprobes.h" 1
# 19 "./include/linux/uprobes.h"
struct vm_area_struct;
struct mm_struct;
struct inode;
struct notifier_block;
struct page;






enum uprobe_filter_ctx {
UPROBE_FILTER_REGISTER,
UPROBE_FILTER_UNREGISTER,
UPROBE_FILTER_MMAP,
};

struct uprobe_consumer {
int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
int (*ret_handler)(struct uprobe_consumer *self,
unsigned long func,
struct pt_regs *regs);
bool (*filter)(struct uprobe_consumer *self,
enum uprobe_filter_ctx ctx,
struct mm_struct *mm);

struct uprobe_consumer *next;
};



# 1 "./arch/riscv/include/asm/uprobes.h" 1





# 1 "./arch/riscv/include/asm/probes.h" 1





typedef u32 probe_opcode_t;
typedef bool (probes_handler_t) (u32 opcode, unsigned long addr, struct pt_regs *);


struct arch_probe_insn {
probe_opcode_t *insn;
probes_handler_t *handler;

unsigned long restore;
};


typedef u32 kprobe_opcode_t;
struct arch_specific_insn {
struct arch_probe_insn api;
};
# 7 "./arch/riscv/include/asm/uprobes.h" 2
# 1 "./arch/riscv/include/asm/patch.h" 1








int patch_text_nosync(void *addr, const void *insns, size_t len);
int patch_text(void *addr, u32 insn);
# 8 "./arch/riscv/include/asm/uprobes.h" 2
# 21 "./arch/riscv/include/asm/uprobes.h"
typedef u32 uprobe_opcode_t;

struct arch_uprobe_task {
unsigned long saved_cause;
};

struct arch_uprobe {
union {
u8 insn[8];
u8 ixol[8];
};
struct arch_probe_insn api;
unsigned long insn_size;
bool simulate;
};

bool uprobe_breakpoint_handler(struct pt_regs *regs);
bool uprobe_single_step_handler(struct pt_regs *regs);
# 50 "./include/linux/uprobes.h" 2

enum uprobe_task_state {
UTASK_RUNNING,
UTASK_SSTEP,
UTASK_SSTEP_ACK,
UTASK_SSTEP_TRAPPED,
};




struct uprobe_task {
enum uprobe_task_state state;

union {
struct {
struct arch_uprobe_task autask;
unsigned long vaddr;
};

struct {
struct callback_head dup_xol_work;
unsigned long dup_xol_addr;
};
};

struct uprobe *active_uprobe;
unsigned long xol_vaddr;

struct return_instance *return_instances;
unsigned int depth;
};

struct return_instance {
struct uprobe *uprobe;
unsigned long func;
unsigned long stack;
unsigned long orig_ret_vaddr;
bool chained;

struct return_instance *next;
};

enum rp_check {
RP_CHECK_CALL,
RP_CHECK_CHAIN_CALL,
RP_CHECK_RET,
};

struct xol_area;

struct uprobes_state {
struct xol_area *xol_area;
};

extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) uprobes_init(void);
extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern bool is_swbp_insn(uprobe_opcode_t *insn);
extern bool is_trap_insn(uprobe_opcode_t *insn);
extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_mmap(struct vm_area_struct *vma);
extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void uprobe_start_dup_mmap(void);
extern void uprobe_end_dup_mmap(void);
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t);
extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
extern bool uprobe_deny_signal(void);
extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
extern void uprobe_clear_state(struct mm_struct *mm);
extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs);
extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len);
# 16 "./include/linux/mm_types.h" 2

# 1 "./include/linux/page-flags-layout.h" 1




# 1 "./include/linux/numa.h" 1
# 47 "./include/linux/numa.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int numa_map_to_online_node(int node)
{
return (-1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int memory_add_physaddr_to_nid(u64 start)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int phys_to_target_node(u64 start)
{
return 0;
}
# 6 "./include/linux/page-flags-layout.h" 2
# 1 "./include/generated/bounds.h" 1
# 7 "./include/linux/page-flags-layout.h" 2
# 30 "./include/linux/page-flags-layout.h"
# 1 "./arch/riscv/include/asm/sparsemem.h" 1
# 31 "./include/linux/page-flags-layout.h" 2
# 18 "./include/linux/mm_types.h" 2
# 1 "./include/linux/workqueue.h" 1








# 1 "./include/linux/timer.h" 1





# 1 "./include/linux/ktime.h" 1
# 24 "./include/linux/ktime.h"
# 1 "./include/linux/time.h" 1








extern struct timezone sys_tz;

int get_timespec64(struct timespec64 *ts,
const struct __kernel_timespec *uts);
int put_timespec64(const struct timespec64 *ts,
struct __kernel_timespec *uts);
int get_itimerspec64(struct itimerspec64 *it,
const struct __kernel_itimerspec *uit);
int put_itimerspec64(const struct itimerspec64 *it,
struct __kernel_itimerspec *uit);

extern time64_t mktime64(const unsigned int year, const unsigned int mon,
const unsigned int day, const unsigned int hour,
const unsigned int min, const unsigned int sec);


extern void clear_itimer(void);




extern long do_utimes(int dfd, const char *filename, struct timespec64 *times, int flags);





struct tm {




int tm_sec;

int tm_min;

int tm_hour;

int tm_mday;

int tm_mon;

long tm_year;

int tm_wday;

int tm_yday;
};

void time64_to_tm(time64_t totalsecs, int offset, struct tm *result);


# 1 "./include/linux/time32.h" 1
# 13 "./include/linux/time32.h"
# 1 "./include/linux/timex.h" 1
# 56 "./include/linux/timex.h"
# 1 "./include/uapi/linux/timex.h" 1
# 56 "./include/uapi/linux/timex.h"
# 1 "./include/linux/time.h" 1
# 57 "./include/uapi/linux/timex.h" 2
# 97 "./include/uapi/linux/timex.h"
struct __kernel_timex_timeval {
__kernel_time64_t tv_sec;
long long tv_usec;
};

struct __kernel_timex {
unsigned int modes;
int :32;
long long offset;
long long freq;
long long maxerror;
long long esterror;
int status;
int :32;
long long constant;
long long precision;
long long tolerance;


struct __kernel_timex_timeval time;
long long tick;

long long ppsfreq;
long long jitter;
int shift;
int :32;
long long stabil;
long long jitcnt;
long long calcnt;
long long errcnt;
long long stbcnt;

int tai;

int :32; int :32; int :32; int :32;
int :32; int :32; int :32; int :32;
int :32; int :32; int :32;
};
# 57 "./include/linux/timex.h" 2








# 1 "./arch/riscv/include/asm/timex.h" 1
# 11 "./arch/riscv/include/asm/timex.h"
typedef unsigned long cycles_t;
# 51 "./arch/riscv/include/asm/timex.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) cycles_t get_cycles(void)
{
return ({ register unsigned long __v; __asm__ __volatile__ ("csrr %0, " "0xc01" : "=r" (__v) : : "memory"); __v; });
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 get_cycles_hi(void)
{
return ({ register unsigned long __v; __asm__ __volatile__ ("csrr %0, " "0xc81" : "=r" (__v) : : "memory"); __v; });
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 get_cycles64(void)
{
return get_cycles();
}
# 85 "./arch/riscv/include/asm/timex.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int read_current_timer(unsigned long *timer_val)
{
*timer_val = get_cycles();
return 0;
}

extern void time_init(void);
# 66 "./include/linux/timex.h" 2
# 139 "./include/linux/timex.h"
extern unsigned long tick_usec;
extern unsigned long tick_nsec;
# 154 "./include/linux/timex.h"
extern int do_adjtimex(struct __kernel_timex *);
extern int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx);

extern void hardpps(const struct timespec64 *, const struct timespec64 *);

int read_current_timer(unsigned long *timer_val);
# 14 "./include/linux/time32.h" 2

# 1 "./include/vdso/time32.h" 1




typedef s32 old_time32_t;

struct old_timespec32 {
old_time32_t tv_sec;
s32 tv_nsec;
};

struct old_timeval32 {
old_time32_t tv_sec;
s32 tv_usec;
};
# 16 "./include/linux/time32.h" 2

struct old_itimerspec32 {
struct old_timespec32 it_interval;
struct old_timespec32 it_value;
};

struct old_utimbuf32 {
old_time32_t actime;
old_time32_t modtime;
};

struct old_timex32 {
u32 modes;
s32 offset;
s32 freq;
s32 maxerror;
s32 esterror;
s32 status;
s32 constant;
s32 precision;
s32 tolerance;
struct old_timeval32 time;
s32 tick;
s32 ppsfreq;
s32 jitter;
s32 shift;
s32 stabil;
s32 jitcnt;
s32 calcnt;
s32 errcnt;
s32 stbcnt;
s32 tai;

s32:32; s32:32; s32:32; s32:32;
s32:32; s32:32; s32:32; s32:32;
s32:32; s32:32; s32:32;
};

extern int get_old_timespec32(struct timespec64 *, const void *);
extern int put_old_timespec32(const struct timespec64 *, void *);
extern int get_old_itimerspec32(struct itimerspec64 *its,
const struct old_itimerspec32 *uits);
extern int put_old_itimerspec32(const struct itimerspec64 *its,
struct old_itimerspec32 *uits);
struct __kernel_timex;
int get_old_timex32(struct __kernel_timex *, const struct old_timex32 *);
int put_old_timex32(struct old_timex32 *, const struct __kernel_timex *);







extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec);
# 61 "./include/linux/time.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool itimerspec64_valid(const struct itimerspec64 *its)
{
if (!timespec64_valid(&(its->it_interval)) ||
!timespec64_valid(&(its->it_value)))
return false;

return true;
}
# 100 "./include/linux/time.h"
# 1 "./include/vdso/time.h" 1






struct timens_offset {
s64 sec;
u64 nsec;
};
# 101 "./include/linux/time.h" 2
# 25 "./include/linux/ktime.h" 2
# 1 "./include/linux/jiffies.h" 1
# 12 "./include/linux/jiffies.h"
# 1 "./include/vdso/jiffies.h" 1




# 1 "./arch/riscv/include/generated/uapi/asm/param.h" 1
# 6 "./include/vdso/jiffies.h" 2
# 13 "./include/linux/jiffies.h" 2
# 1 "./arch/riscv/include/generated/uapi/asm/param.h" 1
# 14 "./include/linux/jiffies.h" 2
# 1 "./include/generated/timeconst.h" 1
# 15 "./include/linux/jiffies.h" 2
# 62 "./include/linux/jiffies.h"
extern int register_refined_jiffies(long clock_tick_rate);
# 79 "./include/linux/jiffies.h"
extern u64 __attribute__((__aligned__((1 << 6)), __section__(".data..cacheline_aligned"))) jiffies_64;
extern unsigned long volatile __attribute__((__aligned__((1 << 6)), __section__(".data..cacheline_aligned"))) jiffies;




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 get_jiffies_64(void)
{
return (u64)jiffies;
}
# 189 "./include/linux/jiffies.h"
extern unsigned long preset_lpj;
# 290 "./include/linux/jiffies.h"
extern unsigned int jiffies_to_msecs(const unsigned long j);
extern unsigned int jiffies_to_usecs(const unsigned long j);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 jiffies_to_nsecs(const unsigned long j)
{
return (u64)jiffies_to_usecs(j) * 1000L;
}

extern u64 jiffies64_to_nsecs(u64 j);
extern u64 jiffies64_to_msecs(u64 j);

extern unsigned long __msecs_to_jiffies(const unsigned int m);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long _msecs_to_jiffies(const unsigned int m)
{
return (m + (1000L / 100) - 1) / (1000L / 100);
}
# 363 "./include/linux/jiffies.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned long msecs_to_jiffies(const unsigned int m)
{
if (__builtin_constant_p(m)) {
if ((int)m < 0)
return ((((long)(~0UL >> 1)) >> 1)-1);
return _msecs_to_jiffies(m);
} else {
return __msecs_to_jiffies(m);
}
}

extern unsigned long __usecs_to_jiffies(const unsigned int u);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long _usecs_to_jiffies(const unsigned int u)
{
return (u + (1000000L / 100) - 1) / (1000000L / 100);
}
# 410 "./include/linux/jiffies.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned long usecs_to_jiffies(const unsigned int u)
{
if (__builtin_constant_p(u)) {
if (u > jiffies_to_usecs(((((long)(~0UL >> 1)) >> 1)-1)))
return ((((long)(~0UL >> 1)) >> 1)-1);
return _usecs_to_jiffies(u);
} else {
return __usecs_to_jiffies(u);
}
}

extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
extern clock_t jiffies_to_clock_t(unsigned long x);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) clock_t jiffies_delta_to_clock_t(long delta)
{
return jiffies_to_clock_t(__builtin_choose_expr(((!!(sizeof((typeof(0L) *)1 == (typeof(delta) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(0L) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(delta) * 0l)) : (int *)8))))), ((0L) > (delta) ? (0L) : (delta)), ({ typeof(0L) __UNIQUE_ID___x67 = (0L); typeof(delta) __UNIQUE_ID___y68 = (delta); ((__UNIQUE_ID___x67) > (__UNIQUE_ID___y68) ? (__UNIQUE_ID___x67) : (__UNIQUE_ID___y68)); })));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int jiffies_delta_to_msecs(long delta)
{
return jiffies_to_msecs(__builtin_choose_expr(((!!(sizeof((typeof(0L) *)1 == (typeof(delta) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(0L) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(delta) * 0l)) : (int *)8))))), ((0L) > (delta) ? (0L) : (delta)), ({ typeof(0L) __UNIQUE_ID___x69 = (0L); typeof(delta) __UNIQUE_ID___y70 = (delta); ((__UNIQUE_ID___x69) > (__UNIQUE_ID___y70) ? (__UNIQUE_ID___x69) : (__UNIQUE_ID___y70)); })));
}

extern unsigned long clock_t_to_jiffies(unsigned long x);
extern u64 jiffies_64_to_clock_t(u64 x);
extern u64 nsec_to_clock_t(u64 x);
extern u64 nsecs_to_jiffies64(u64 n);
extern unsigned long nsecs_to_jiffies(u64 n);
# 26 "./include/linux/ktime.h" 2



typedef s64 ktime_t;
# 38 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
{
if (__builtin_expect(!!(secs >= (((s64)~((u64)1 << 63)) / 1000000000L)), 0))
return ((s64)~((u64)1 << 63));

return secs * 1000000000L + (s64)nsecs;
}
# 71 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t timespec64_to_ktime(struct timespec64 ts)
{
return ktime_set(ts.tv_sec, ts.tv_nsec);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 ktime_to_ns(const ktime_t kt)
{
return kt;
}
# 95 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
{
if (cmp1 < cmp2)
return -1;
if (cmp1 > cmp2)
return 1;
return 0;
}
# 111 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ktime_after(const ktime_t cmp1, const ktime_t cmp2)
{
return ktime_compare(cmp1, cmp2) > 0;
}
# 123 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
{
return ktime_compare(cmp1, cmp2) < 0;
}
# 148 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 ktime_divns(const ktime_t kt, s64 div)
{




({ int __ret_warn_on = !!(div < 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/ktime.h"), "i" (154), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return kt / div;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 ktime_to_us(const ktime_t kt)
{
return ktime_divns(kt, 1000L);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 ktime_to_ms(const ktime_t kt)
{
return ktime_divns(kt, 1000000L);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
{
return ktime_to_us(((later) - (earlier)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier)
{
return ktime_to_ms(((later) - (earlier)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
{
return ((kt) + (usec * 1000L));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ktime_add_ms(const ktime_t kt, const u64 msec)
{
return ((kt) + (msec * 1000000L));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
{
return ((kt) - (usec * 1000L));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
{
return ((kt) - (msec * 1000000L));
}

extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
# 209 "./include/linux/ktime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) bool ktime_to_timespec64_cond(const ktime_t kt,
struct timespec64 *ts)
{
if (kt) {
*ts = ns_to_timespec64((kt));
return true;
} else {
return false;
}
}


# 1 "./include/vdso/ktime.h" 1
# 221 "./include/linux/ktime.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ns_to_ktime(u64 ns)
{
return ns;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ms_to_ktime(u64 ms)
{
return ms * 1000000L;
}


# 1 "./include/linux/timekeeping.h" 1





# 1 "./include/linux/clocksource_ids.h" 1





enum clocksource_ids {
CSID_GENERIC = 0,
CSID_ARM_ARCH_COUNTER,
CSID_MAX,
};
# 7 "./include/linux/timekeeping.h" 2



void timekeeping_init(void);
extern int timekeeping_suspended;


extern void legacy_timer_tick(unsigned long ticks);




extern int do_settimeofday64(const struct timespec64 *ts);
extern int do_sys_settimeofday64(const struct timespec64 *tv,
const struct timezone *tz);
# 41 "./include/linux/timekeeping.h"
extern void ktime_get_raw_ts64(struct timespec64 *ts);
extern void ktime_get_ts64(struct timespec64 *ts);
extern void ktime_get_real_ts64(struct timespec64 *tv);
extern void ktime_get_coarse_ts64(struct timespec64 *ts);
extern void ktime_get_coarse_real_ts64(struct timespec64 *ts);

void getboottime64(struct timespec64 *ts);




extern time64_t ktime_get_seconds(void);
extern time64_t __ktime_get_real_seconds(void);
extern time64_t ktime_get_real_seconds(void);





enum tk_offsets {
TK_OFFS_REAL,
TK_OFFS_BOOT,
TK_OFFS_TAI,
TK_OFFS_MAX,
};

extern ktime_t ktime_get(void);
extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
extern ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs);
extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
extern ktime_t ktime_get_raw(void);
extern u32 ktime_get_resolution_ns(void);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ktime_get_real(void)
{
return ktime_get_with_offset(TK_OFFS_REAL);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ktime_get_coarse_real(void)
{
return ktime_get_coarse_with_offset(TK_OFFS_REAL);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ktime_get_boottime(void)
{
return ktime_get_with_offset(TK_OFFS_BOOT);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ktime_get_coarse_boottime(void)
{
return ktime_get_coarse_with_offset(TK_OFFS_BOOT);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ktime_get_clocktai(void)
{
return ktime_get_with_offset(TK_OFFS_TAI);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ktime_get_coarse_clocktai(void)
{
return ktime_get_coarse_with_offset(TK_OFFS_TAI);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ktime_get_coarse(void)
{
struct timespec64 ts;

ktime_get_coarse_ts64(&ts);
return timespec64_to_ktime(ts);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 ktime_get_coarse_ns(void)
{
return ktime_to_ns(ktime_get_coarse());
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 ktime_get_coarse_real_ns(void)
{
return ktime_to_ns(ktime_get_coarse_real());
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 ktime_get_coarse_boottime_ns(void)
{
return ktime_to_ns(ktime_get_coarse_boottime());
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 ktime_get_coarse_clocktai_ns(void)
{
return ktime_to_ns(ktime_get_coarse_clocktai());
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t ktime_mono_to_real(ktime_t mono)
{
return ktime_mono_to_any(mono, TK_OFFS_REAL);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 ktime_get_ns(void)
{
return ktime_to_ns(ktime_get());
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 ktime_get_real_ns(void)
{
return ktime_to_ns(ktime_get_real());
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 ktime_get_boottime_ns(void)
{
return ktime_to_ns(ktime_get_boottime());
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 ktime_get_clocktai_ns(void)
{
return ktime_to_ns(ktime_get_clocktai());
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 ktime_get_raw_ns(void)
{
return ktime_to_ns(ktime_get_raw());
}

extern u64 ktime_get_mono_fast_ns(void);
extern u64 ktime_get_raw_fast_ns(void);
extern u64 ktime_get_boot_fast_ns(void);
extern u64 ktime_get_real_fast_ns(void);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ktime_get_boottime_ts64(struct timespec64 *ts)
{
*ts = ns_to_timespec64((ktime_get_boottime()));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ktime_get_coarse_boottime_ts64(struct timespec64 *ts)
{
*ts = ns_to_timespec64((ktime_get_coarse_boottime()));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) time64_t ktime_get_boottime_seconds(void)
{
return ktime_divns(ktime_get_coarse_boottime(), 1000000000L);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ktime_get_clocktai_ts64(struct timespec64 *ts)
{
*ts = ns_to_timespec64((ktime_get_clocktai()));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ktime_get_coarse_clocktai_ts64(struct timespec64 *ts)
{
*ts = ns_to_timespec64((ktime_get_coarse_clocktai()));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) time64_t ktime_get_clocktai_seconds(void)
{
return ktime_divns(ktime_get_coarse_clocktai(), 1000000000L);
}




extern bool timekeeping_rtc_skipsuspend(void);
extern bool timekeeping_rtc_skipresume(void);

extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);







struct ktime_timestamps {
u64 mono;
u64 boot;
u64 real;
};
# 246 "./include/linux/timekeeping.h"
struct system_time_snapshot {
u64 cycles;
ktime_t real;
ktime_t raw;
enum clocksource_ids cs_id;
unsigned int clock_was_set_seq;
u8 cs_was_changed_seq;
};
# 262 "./include/linux/timekeeping.h"
struct system_device_crosststamp {
ktime_t device;
ktime_t sys_realtime;
ktime_t sys_monoraw;
};
# 275 "./include/linux/timekeeping.h"
struct system_counterval_t {
u64 cycles;
struct clocksource *cs;
};




extern int get_device_system_crosststamp(
int (*get_time_fn)(ktime_t *device_time,
struct system_counterval_t *system_counterval,
void *ctx),
void *ctx,
struct system_time_snapshot *history,
struct system_device_crosststamp *xtstamp);




extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);


extern void ktime_get_fast_timestamps(struct ktime_timestamps *snap);




extern int persistent_clock_is_local;

extern void read_persistent_clock64(struct timespec64 *ts);
void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
struct timespec64 *boot_offset);
# 233 "./include/linux/ktime.h" 2
# 7 "./include/linux/timer.h" 2

# 1 "./include/linux/debugobjects.h" 1







enum debug_obj_state {
ODEBUG_STATE_NONE,
ODEBUG_STATE_INIT,
ODEBUG_STATE_INACTIVE,
ODEBUG_STATE_ACTIVE,
ODEBUG_STATE_DESTROYED,
ODEBUG_STATE_NOTAVAILABLE,
ODEBUG_STATE_MAX,
};

struct debug_obj_descr;
# 28 "./include/linux/debugobjects.h"
struct debug_obj {
struct hlist_node node;
enum debug_obj_state state;
unsigned int astate;
void *object;
const struct debug_obj_descr *descr;
};
# 55 "./include/linux/debugobjects.h"
struct debug_obj_descr {
const char *name;
void *(*debug_hint)(void *addr);
bool (*is_static_object)(void *addr);
bool (*fixup_init)(void *addr, enum debug_obj_state state);
bool (*fixup_activate)(void *addr, enum debug_obj_state state);
bool (*fixup_destroy)(void *addr, enum debug_obj_state state);
bool (*fixup_free)(void *addr, enum debug_obj_state state);
bool (*fixup_assert_init)(void *addr, enum debug_obj_state state);
};
# 88 "./include/linux/debugobjects.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
debug_object_init (void *addr, const struct debug_obj_descr *descr) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
debug_object_activate (void *addr, const struct debug_obj_descr *descr) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
debug_object_destroy (void *addr, const struct debug_obj_descr *descr) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
debug_object_free (void *addr, const struct debug_obj_descr *descr) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) { }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void debug_objects_early_init(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void debug_objects_mem_init(void) { }





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
debug_check_no_obj_freed(const void *address, unsigned long size) { }
# 9 "./include/linux/timer.h" 2


struct timer_list {




struct hlist_node entry;
unsigned long expires;
void (*function)(struct timer_list *);
u32 flags;


struct lockdep_map lockdep_map;

};
# 91 "./include/linux/timer.h"
void init_timer_key(struct timer_list *timer,
void (*func)(struct timer_list *), unsigned int flags,
const char *name, struct lock_class_key *key);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void init_timer_on_stack_key(struct timer_list *timer,
void (*func)(struct timer_list *),
unsigned int flags,
const char *name,
struct lock_class_key *key)
{
init_timer_key(timer, func, flags, name, key);
}
# 150 "./include/linux/timer.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void destroy_timer_on_stack(struct timer_list *timer) { }
# 166 "./include/linux/timer.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int timer_pending(const struct timer_list * timer)
{
return !hlist_unhashed_lockless(&timer->entry);
}

extern void add_timer_on(struct timer_list *timer, int cpu);
extern int del_timer(struct timer_list * timer);
extern int mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
extern int timer_reduce(struct timer_list *timer, unsigned long expires);







extern void add_timer(struct timer_list *timer);

extern int try_to_del_timer_sync(struct timer_list *timer);


extern int del_timer_sync(struct timer_list *timer);






extern void init_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);


struct ctl_table;

extern unsigned int sysctl_timer_migration;
int timer_migration_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);


unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
unsigned long round_jiffies_relative(unsigned long j);

unsigned long __round_jiffies_up(unsigned long j, int cpu);
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
unsigned long round_jiffies_up(unsigned long j);
unsigned long round_jiffies_up_relative(unsigned long j);


int timers_prepare_cpu(unsigned int cpu);
int timers_dead_cpu(unsigned int cpu);
# 10 "./include/linux/workqueue.h" 2








struct workqueue_struct;

struct work_struct;
typedef void (*work_func_t)(struct work_struct *work);
void delayed_work_timer_fn(struct timer_list *t);







enum {
WORK_STRUCT_PENDING_BIT = 0,
WORK_STRUCT_INACTIVE_BIT= 1,
WORK_STRUCT_PWQ_BIT = 2,
WORK_STRUCT_LINKED_BIT = 3,




WORK_STRUCT_COLOR_SHIFT = 4,


WORK_STRUCT_COLOR_BITS = 4,

WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT,
WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,



WORK_STRUCT_STATIC = 0,


WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS),


WORK_CPU_UNBOUND = 32,






WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
WORK_STRUCT_COLOR_BITS,


WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,

__WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),






WORK_OFFQ_FLAG_BITS = 1,
WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
WORK_OFFQ_LEFT = 64 - WORK_OFFQ_POOL_SHIFT,
WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,


WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,


WORK_BUSY_PENDING = 1 << 0,
WORK_BUSY_RUNNING = 1 << 1,


WORKER_DESC_LEN = 24,
};

struct work_struct {
atomic_long_t data;
struct list_head entry;
work_func_t func;

struct lockdep_map lockdep_map;

};





struct delayed_work {
struct work_struct work;
struct timer_list timer;


struct workqueue_struct *wq;
int cpu;
};

struct rcu_work {
struct work_struct work;
struct callback_head rcu;


struct workqueue_struct *wq;
};






struct workqueue_attrs {



int nice;




cpumask_var_t cpumask;
# 150 "./include/linux/workqueue.h"
bool no_numa;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct delayed_work *to_delayed_work(struct work_struct *work)
{
return ({ void *__mptr = (void *)(work); _Static_assert(__builtin_types_compatible_p(typeof(*(work)), typeof(((struct delayed_work *)0)->work)) || __builtin_types_compatible_p(typeof(*(work)), typeof(void)), "pointer type mismatch in container_of()"); ((struct delayed_work *)(__mptr - __builtin_offsetof(struct delayed_work, work))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct rcu_work *to_rcu_work(struct work_struct *work)
{
return ({ void *__mptr = (void *)(work); _Static_assert(__builtin_types_compatible_p(typeof(*(work)), typeof(((struct rcu_work *)0)->work)) || __builtin_types_compatible_p(typeof(*(work)), typeof(void)), "pointer type mismatch in container_of()"); ((struct rcu_work *)(__mptr - __builtin_offsetof(struct rcu_work, work))); });
}

struct execute_work {
struct work_struct work;
};
# 210 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __init_work(struct work_struct *work, int onstack) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void destroy_work_on_stack(struct work_struct *work) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void destroy_delayed_work_on_stack(struct delayed_work *work) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int work_static(struct work_struct *work) { return 0; }
# 303 "./include/linux/workqueue.h"
enum {
WQ_UNBOUND = 1 << 1,
WQ_FREEZABLE = 1 << 2,
WQ_MEM_RECLAIM = 1 << 3,
WQ_HIGHPRI = 1 << 4,
WQ_CPU_INTENSIVE = 1 << 5,
WQ_SYSFS = 1 << 6,
# 336 "./include/linux/workqueue.h"
WQ_POWER_EFFICIENT = 1 << 7,

__WQ_DRAINING = 1 << 16,
__WQ_ORDERED = 1 << 17,
__WQ_LEGACY = 1 << 18,
__WQ_ORDERED_EXPLICIT = 1 << 19,

WQ_MAX_ACTIVE = 512,
WQ_MAX_UNBOUND_PER_CPU = 4,
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
};
# 380 "./include/linux/workqueue.h"
extern struct workqueue_struct *system_wq;
extern struct workqueue_struct *system_highpri_wq;
extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_unbound_wq;
extern struct workqueue_struct *system_freezable_wq;
extern struct workqueue_struct *system_power_efficient_wq;
extern struct workqueue_struct *system_freezable_power_efficient_wq;
# 402 "./include/linux/workqueue.h"
__attribute__((__format__(printf, 1, 4))) struct workqueue_struct *
alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
# 430 "./include/linux/workqueue.h"
extern void destroy_workqueue(struct workqueue_struct *wq);

struct workqueue_attrs *alloc_workqueue_attrs(void);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);

extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
extern bool queue_work_node(int node, struct workqueue_struct *wq,
struct work_struct *work);
extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);

extern void flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);

extern int schedule_on_each_cpu(work_func_t func);

int execute_in_process_context(work_func_t fn, struct execute_work *);

extern bool flush_work(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);

extern bool flush_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);

extern bool flush_rcu_work(struct rcu_work *rwork);

extern void workqueue_set_max_active(struct workqueue_struct *wq,
int max_active);
extern struct work_struct *current_work(void);
extern bool current_is_workqueue_rescuer(void);
extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);
extern __attribute__((__format__(printf, 1, 2))) void set_worker_desc(const char *fmt, ...);
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
extern void show_all_workqueues(void);
extern void show_one_workqueue(struct workqueue_struct *wq);
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
# 499 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool queue_work(struct workqueue_struct *wq,
struct work_struct *work)
{
return queue_work_on(WORK_CPU_UNBOUND, wq, work);
}
# 513 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
}
# 528 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mod_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork,
unsigned long delay)
{
return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
}
# 542 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool schedule_work_on(int cpu, struct work_struct *work)
{
return queue_work_on(cpu, system_wq, work);
}
# 561 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool schedule_work(struct work_struct *work)
{
return queue_work(system_wq, work);
}
# 590 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_scheduled_work(void)
{
flush_workqueue(system_wq);
}
# 604 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work_on(cpu, system_wq, dwork, delay);
}
# 618 "./include/linux/workqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work(system_wq, dwork, delay);
}
# 634 "./include/linux/workqueue.h"
long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
# 645 "./include/linux/workqueue.h"
int workqueue_sysfs_register(struct workqueue_struct *wq);






void wq_watchdog_touch(int cpu);





int workqueue_prepare_cpu(unsigned int cpu);
int workqueue_online_cpu(unsigned int cpu);
int workqueue_offline_cpu(unsigned int cpu);


void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) workqueue_init_early(void);
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) workqueue_init(void);
# 19 "./include/linux/mm_types.h" 2
# 1 "./include/linux/seqlock.h" 1
# 19 "./include/linux/seqlock.h"
# 1 "./include/linux/mutex.h" 1
# 63 "./include/linux/mutex.h"
struct mutex {
atomic_long_t owner;
raw_spinlock_t wait_lock;

struct optimistic_spin_queue osq;

struct list_head wait_list;

void *magic;


struct lockdep_map dep_map;

};






extern void mutex_destroy(struct mutex *lock);
# 118 "./include/linux/mutex.h"
extern void __mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key);







extern bool mutex_is_locked(struct mutex *lock);
# 178 "./include/linux/mutex.h"
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);

extern int __attribute__((__warn_unused_result__)) mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
extern int __attribute__((__warn_unused_result__)) mutex_lock_killable_nested(struct mutex *lock,
unsigned int subclass);
extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
# 217 "./include/linux/mutex.h"
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);

extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
# 20 "./include/linux/seqlock.h" 2
# 1 "./include/linux/ww_mutex.h" 1
# 21 "./include/linux/ww_mutex.h"
# 1 "./include/linux/rtmutex.h" 1
# 21 "./include/linux/rtmutex.h"
extern int max_lock_depth;

struct rt_mutex_base {
raw_spinlock_t wait_lock;
struct rb_root_cached waiters;
struct task_struct *owner;
};
# 42 "./include/linux/rtmutex.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_71(void) ; if (!((sizeof(lock->owner) == sizeof(char) || sizeof(lock->owner) == sizeof(short) || sizeof(lock->owner) == sizeof(int) || sizeof(lock->owner) == sizeof(long)) || sizeof(lock->owner) == sizeof(long long))) __compiletime_assert_71(); } while (0); (*(const volatile typeof( _Generic((lock->owner), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (lock->owner))) *)&(lock->owner)); }) != ((void *)0);
}

extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
# 57 "./include/linux/rtmutex.h"
struct rt_mutex {
struct rt_mutex_base rtmutex;

struct lockdep_map dep_map;

};

struct rt_mutex_waiter;
struct hrtimer_sleeper;


extern void rt_mutex_debug_task_free(struct task_struct *tsk);
# 98 "./include/linux/rtmutex.h"
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);


extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock);
# 116 "./include/linux/rtmutex.h"
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
extern int rt_mutex_lock_killable(struct rt_mutex *lock);
extern int rt_mutex_trylock(struct rt_mutex *lock);

extern void rt_mutex_unlock(struct rt_mutex *lock);
# 22 "./include/linux/ww_mutex.h" 2
# 38 "./include/linux/ww_mutex.h"
struct ww_class {
atomic_long_t stamp;
struct lock_class_key acquire_key;
struct lock_class_key mutex_key;
const char *acquire_name;
const char *mutex_name;
unsigned int is_wait_die;
};

struct ww_mutex {
struct mutex base;
struct ww_acquire_ctx *ctx;

struct ww_class *ww_class;

};

struct ww_acquire_ctx {
struct task_struct *task;
unsigned long stamp;
unsigned int acquired;
unsigned short wounded;
unsigned short is_wait_die;

unsigned int done_acquire;
struct ww_class *ww_class;
void *contending_lock;


struct lockdep_map dep_map;





};
# 98 "./include/linux/ww_mutex.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ww_mutex_init(struct ww_mutex *lock,
struct ww_class *ww_class)
{
__mutex_init(&lock->base,ww_class->mutex_name,&ww_class->mutex_key);
lock->ctx = ((void *)0);

lock->ww_class = ww_class;

}
# 132 "./include/linux/ww_mutex.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ww_acquire_init(struct ww_acquire_ctx *ctx,
struct ww_class *ww_class)
{
ctx->task = get_current();
ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
ctx->acquired = 0;
ctx->wounded = false;
ctx->is_wait_die = ww_class->is_wait_die;

ctx->ww_class = ww_class;
ctx->done_acquire = 0;
ctx->contending_lock = ((void *)0);


debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
&ww_class->acquire_key, 0);
lock_acquire(&ctx->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));





}
# 168 "./include/linux/ww_mutex.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ww_acquire_done(struct ww_acquire_ctx *ctx)
{

do { ({ int __ret_warn_on = !!(debug_locks && !(lock_is_held(&(ctx)->dep_map) != 0)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/ww_mutex.h"), "i" (171), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0);

({ int __ret = 0; if (!oops_in_progress && __builtin_expect(!!(ctx->done_acquire), 0)) { do { } while(0); if (debug_locks_off() && !debug_locks_silent) ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("DEBUG_LOCKS_WARN_ON(%s)", "ctx->done_acquire"); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/ww_mutex.h"), "i" (173), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { } while(0); __ret = 1; } __ret; });
ctx->done_acquire = 1;

}
# 185 "./include/linux/ww_mutex.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{

lock_release(&ctx->dep_map, ({ __label__ __here; __here: (unsigned long)&&__here; }));


({ int __ret = 0; if (!oops_in_progress && __builtin_expect(!!(ctx->acquired), 0)) { do { } while(0); if (debug_locks_off() && !debug_locks_silent) ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("DEBUG_LOCKS_WARN_ON(%s)", "ctx->acquired"); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/ww_mutex.h"), "i" (191), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { } while(0); __ret = 1; } __ret; });
if (!0)




ctx->done_acquire = 1;

if (!1)

ctx->acquired = ~0U;

}
# 234 "./include/linux/ww_mutex.h"
extern int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx);
# 266 "./include/linux/ww_mutex.h"
extern int __attribute__((__warn_unused_result__)) ww_mutex_lock_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx);
# 292 "./include/linux/ww_mutex.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
int ret;

({ int __ret = 0; if (!oops_in_progress && __builtin_expect(!!(!ctx->contending_lock), 0)) { do { } while(0); if (debug_locks_off() && !debug_locks_silent) ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("DEBUG_LOCKS_WARN_ON(%s)", "!ctx->contending_lock"); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/ww_mutex.h"), "i" (297), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { } while(0); __ret = 1; } __ret; });

ret = ww_mutex_lock(lock, ctx);
(void)ret;
}
# 328 "./include/linux/ww_mutex.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__))
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{

({ int __ret = 0; if (!oops_in_progress && __builtin_expect(!!(!ctx->contending_lock), 0)) { do { } while(0); if (debug_locks_off() && !debug_locks_silent) ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("DEBUG_LOCKS_WARN_ON(%s)", "!ctx->contending_lock"); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/ww_mutex.h"), "i" (333), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { } while(0); __ret = 1; } __ret; });

return ww_mutex_lock_interruptible(lock, ctx);
}

extern void ww_mutex_unlock(struct ww_mutex *lock);

extern int __attribute__((__warn_unused_result__)) ww_mutex_trylock(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx);
# 351 "./include/linux/ww_mutex.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ww_mutex_destroy(struct ww_mutex *lock)
{

mutex_destroy(&lock->base);

}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ww_mutex_is_locked(struct ww_mutex *lock)
{
return mutex_is_locked((&lock->base));
}
# 21 "./include/linux/seqlock.h" 2
# 65 "./include/linux/seqlock.h"
typedef struct seqcount {
unsigned sequence;

struct lockdep_map dep_map;

} seqcount_t;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __seqcount_init(seqcount_t *s, const char *name,
struct lock_class_key *key)
{



lockdep_init_map(&s->dep_map, name, key, 0);
s->sequence = 0;
}
# 97 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void seqcount_lockdep_reader_access(const seqcount_t *s)
{
seqcount_t *l = (seqcount_t *)s;
unsigned long flags;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); if (!({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) trace_hardirqs_off(); } while (0);
lock_acquire(&l->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
lock_release(&l->dep_map, (unsigned long)__builtin_return_address(0));
do { if (!({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(flags); } while (0); } while (0);
}
# 254 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) seqcount_t *__seqprop_ptr(seqcount_t *s)
{
return s;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned __seqprop_sequence(const seqcount_t *s)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_72(void) ; if (!((sizeof(s->sequence) == sizeof(char) || sizeof(s->sequence) == sizeof(short) || sizeof(s->sequence) == sizeof(int) || sizeof(s->sequence) == sizeof(long)) || sizeof(s->sequence) == sizeof(long long))) __compiletime_assert_72(); } while (0); (*(const volatile typeof( _Generic((s->sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->sequence))) *)&(s->sequence)); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __seqprop_preemptible(const seqcount_t *s)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __seqprop_assert(const seqcount_t *s)
{
do { } while (0);
}



typedef struct seqcount_raw_spinlock { seqcount_t seqcount; raw_spinlock_t *lock; } seqcount_raw_spinlock_t; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) seqcount_t * __seqprop_raw_spinlock_ptr(seqcount_raw_spinlock_t *s) { return &s->seqcount; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned __seqprop_raw_spinlock_sequence(const seqcount_raw_spinlock_t *s) { unsigned seq = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_73(void) ; if (!((sizeof(s->seqcount.sequence) == sizeof(char) || sizeof(s->seqcount.sequence) == sizeof(short) || sizeof(s->seqcount.sequence) == sizeof(int) || sizeof(s->seqcount.sequence) == sizeof(long)) || sizeof(s->seqcount.sequence) == sizeof(long long))) __compiletime_assert_73(); } while (0); (*(const volatile typeof( _Generic((s->seqcount.sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->seqcount.sequence))) *)&(s->seqcount.sequence)); }); if (!0) return seq; if (false && __builtin_expect(!!(seq & 1), 0)) { _raw_spin_lock(s->lock); _raw_spin_unlock(s->lock); seq = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_74(void) ; if (!((sizeof(s->seqcount.sequence) == sizeof(char) || sizeof(s->seqcount.sequence) == sizeof(short) || sizeof(s->seqcount.sequence) == sizeof(int) || sizeof(s->seqcount.sequence) == sizeof(long)) || sizeof(s->seqcount.sequence) == sizeof(long long))) __compiletime_assert_74(); } while (0); (*(const volatile typeof( _Generic((s->seqcount.sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->seqcount.sequence))) *)&(s->seqcount.sequence)); }); } return seq; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool __seqprop_raw_spinlock_preemptible(const seqcount_raw_spinlock_t *s) { if (!0) return false; return false; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __seqprop_raw_spinlock_assert(const seqcount_raw_spinlock_t *s) { do { ({ int __ret_warn_on = !!(debug_locks && !(lock_is_held(&(s->lock)->dep_map) != 0)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/seqlock.h"), "i" (276), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0); }
typedef struct seqcount_spinlock { seqcount_t seqcount; spinlock_t *lock; } seqcount_spinlock_t; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) seqcount_t * __seqprop_spinlock_ptr(seqcount_spinlock_t *s) { return &s->seqcount; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned __seqprop_spinlock_sequence(const seqcount_spinlock_t *s) { unsigned seq = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_75(void) ; if (!((sizeof(s->seqcount.sequence) == sizeof(char) || sizeof(s->seqcount.sequence) == sizeof(short) || sizeof(s->seqcount.sequence) == sizeof(int) || sizeof(s->seqcount.sequence) == sizeof(long)) || sizeof(s->seqcount.sequence) == sizeof(long long))) __compiletime_assert_75(); } while (0); (*(const volatile typeof( _Generic((s->seqcount.sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->seqcount.sequence))) *)&(s->seqcount.sequence)); }); if (!0) return seq; if (0 && __builtin_expect(!!(seq & 1), 0)) { spin_lock(s->lock); spin_unlock(s->lock); seq = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_76(void) ; if (!((sizeof(s->seqcount.sequence) == sizeof(char) || sizeof(s->seqcount.sequence) == sizeof(short) || sizeof(s->seqcount.sequence) == sizeof(int) || sizeof(s->seqcount.sequence) == sizeof(long)) || sizeof(s->seqcount.sequence) == sizeof(long long))) __compiletime_assert_76(); } while (0); (*(const volatile typeof( _Generic((s->seqcount.sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->seqcount.sequence))) *)&(s->seqcount.sequence)); }); } return seq; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool __seqprop_spinlock_preemptible(const seqcount_spinlock_t *s) { if (!0) return 0; return false; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __seqprop_spinlock_assert(const seqcount_spinlock_t *s) { do { ({ int __ret_warn_on = !!(debug_locks && !(lock_is_held(&(s->lock)->dep_map) != 0)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/seqlock.h"), "i" (277), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0); }
typedef struct seqcount_rwlock { seqcount_t seqcount; rwlock_t *lock; } seqcount_rwlock_t; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) seqcount_t * __seqprop_rwlock_ptr(seqcount_rwlock_t *s) { return &s->seqcount; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned __seqprop_rwlock_sequence(const seqcount_rwlock_t *s) { unsigned seq = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_77(void) ; if (!((sizeof(s->seqcount.sequence) == sizeof(char) || sizeof(s->seqcount.sequence) == sizeof(short) || sizeof(s->seqcount.sequence) == sizeof(int) || sizeof(s->seqcount.sequence) == sizeof(long)) || sizeof(s->seqcount.sequence) == sizeof(long long))) __compiletime_assert_77(); } while (0); (*(const volatile typeof( _Generic((s->seqcount.sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->seqcount.sequence))) *)&(s->seqcount.sequence)); }); if (!0) return seq; if (0 && __builtin_expect(!!(seq & 1), 0)) { _raw_read_lock(s->lock); _raw_read_unlock(s->lock); seq = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_78(void) ; if (!((sizeof(s->seqcount.sequence) == sizeof(char) || sizeof(s->seqcount.sequence) == sizeof(short) || sizeof(s->seqcount.sequence) == sizeof(int) || sizeof(s->seqcount.sequence) == sizeof(long)) || sizeof(s->seqcount.sequence) == sizeof(long long))) __compiletime_assert_78(); } while (0); (*(const volatile typeof( _Generic((s->seqcount.sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->seqcount.sequence))) *)&(s->seqcount.sequence)); }); } return seq; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool __seqprop_rwlock_preemptible(const seqcount_rwlock_t *s) { if (!0) return 0; return false; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __seqprop_rwlock_assert(const seqcount_rwlock_t *s) { do { ({ int __ret_warn_on = !!(debug_locks && !(lock_is_held(&(s->lock)->dep_map) != 0)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/seqlock.h"), "i" (278), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0); }
typedef struct seqcount_mutex { seqcount_t seqcount; struct mutex *lock; } seqcount_mutex_t; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) seqcount_t * __seqprop_mutex_ptr(seqcount_mutex_t *s) { return &s->seqcount; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned __seqprop_mutex_sequence(const seqcount_mutex_t *s) { unsigned seq = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_79(void) ; if (!((sizeof(s->seqcount.sequence) == sizeof(char) || sizeof(s->seqcount.sequence) == sizeof(short) || sizeof(s->seqcount.sequence) == sizeof(int) || sizeof(s->seqcount.sequence) == sizeof(long)) || sizeof(s->seqcount.sequence) == sizeof(long long))) __compiletime_assert_79(); } while (0); (*(const volatile typeof( _Generic((s->seqcount.sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->seqcount.sequence))) *)&(s->seqcount.sequence)); }); if (!0) return seq; if (true && __builtin_expect(!!(seq & 1), 0)) { mutex_lock_nested(s->lock, 0); mutex_unlock(s->lock); seq = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_80(void) ; if (!((sizeof(s->seqcount.sequence) == sizeof(char) || sizeof(s->seqcount.sequence) == sizeof(short) || sizeof(s->seqcount.sequence) == sizeof(int) || sizeof(s->seqcount.sequence) == sizeof(long)) || sizeof(s->seqcount.sequence) == sizeof(long long))) __compiletime_assert_80(); } while (0); (*(const volatile typeof( _Generic((s->seqcount.sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->seqcount.sequence))) *)&(s->seqcount.sequence)); }); } return seq; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool __seqprop_mutex_preemptible(const seqcount_mutex_t *s) { if (!0) return true; return false; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __seqprop_mutex_assert(const seqcount_mutex_t *s) { do { ({ int __ret_warn_on = !!(debug_locks && !(lock_is_held(&(s->lock)->dep_map) != 0)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/seqlock.h"), "i" (279), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0); }
typedef struct seqcount_ww_mutex { seqcount_t seqcount; struct ww_mutex *lock; } seqcount_ww_mutex_t; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) seqcount_t * __seqprop_ww_mutex_ptr(seqcount_ww_mutex_t *s) { return &s->seqcount; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned __seqprop_ww_mutex_sequence(const seqcount_ww_mutex_t *s) { unsigned seq = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_81(void) ; if (!((sizeof(s->seqcount.sequence) == sizeof(char) || sizeof(s->seqcount.sequence) == sizeof(short) || sizeof(s->seqcount.sequence) == sizeof(int) || sizeof(s->seqcount.sequence) == sizeof(long)) || sizeof(s->seqcount.sequence) == sizeof(long long))) __compiletime_assert_81(); } while (0); (*(const volatile typeof( _Generic((s->seqcount.sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->seqcount.sequence))) *)&(s->seqcount.sequence)); }); if (!0) return seq; if (true && __builtin_expect(!!(seq & 1), 0)) { ww_mutex_lock(s->lock, ((void *)0)); ww_mutex_unlock(s->lock); seq = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_82(void) ; if (!((sizeof(s->seqcount.sequence) == sizeof(char) || sizeof(s->seqcount.sequence) == sizeof(short) || sizeof(s->seqcount.sequence) == sizeof(int) || sizeof(s->seqcount.sequence) == sizeof(long)) || sizeof(s->seqcount.sequence) == sizeof(long long))) __compiletime_assert_82(); } while (0); (*(const volatile typeof( _Generic((s->seqcount.sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->seqcount.sequence))) *)&(s->seqcount.sequence)); }); } return seq; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool __seqprop_ww_mutex_preemptible(const seqcount_ww_mutex_t *s) { if (!0) return true; return false; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __seqprop_ww_mutex_assert(const seqcount_ww_mutex_t *s) { do { ({ int __ret_warn_on = !!(debug_locks && !(lock_is_held(&(&s->lock->base)->dep_map) != 0)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/seqlock.h"), "i" (280), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0); }
# 430 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
{
kcsan_atomic_next(0);
return __builtin_expect(!!(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_83(void) ; if (!((sizeof(s->sequence) == sizeof(char) || sizeof(s->sequence) == sizeof(short) || sizeof(s->sequence) == sizeof(int) || sizeof(s->sequence) == sizeof(long)) || sizeof(s->sequence) == sizeof(long long))) __compiletime_assert_83(); } while (0); (*(const volatile typeof( _Generic((s->sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->sequence))) *)&(s->sequence)); }) != start), 0);
}
# 450 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
{
do { do { } while (0); __asm__ __volatile__ ("fence " "r" "," "r" : : : "memory"); } while (0);
return do___read_seqcount_retry(s, start);
}
# 470 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void do_raw_write_seqcount_begin(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
do { do { } while (0); __asm__ __volatile__ ("fence " "w" "," "w" : : : "memory"); } while (0);
}
# 491 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void do_raw_write_seqcount_end(seqcount_t *s)
{
do { do { } while (0); __asm__ __volatile__ ("fence " "w" "," "w" : : : "memory"); } while (0);
s->sequence++;
kcsan_nestable_atomic_end();
}
# 517 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
do_raw_write_seqcount_begin(s);
lock_acquire(&s->dep_map, subclass, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
}
# 543 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void do_write_seqcount_begin(seqcount_t *s)
{
do_write_seqcount_begin_nested(s, 0);
}
# 563 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void do_write_seqcount_end(seqcount_t *s)
{
lock_release(&s->dep_map, (unsigned long)__builtin_return_address(0));
do_raw_write_seqcount_end(s);
}
# 613 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void do_raw_write_seqcount_barrier(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
do { do { } while (0); __asm__ __volatile__ ("fence " "w" "," "w" : : : "memory"); } while (0);
s->sequence++;
kcsan_nestable_atomic_end();
}
# 633 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void do_write_seqcount_invalidate(seqcount_t *s)
{
do { do { } while (0); __asm__ __volatile__ ("fence " "w" "," "w" : : : "memory"); } while (0);
kcsan_nestable_atomic_begin();
s->sequence+=2;
kcsan_nestable_atomic_end();
}
# 651 "./include/linux/seqlock.h"
typedef struct {
seqcount_t seqcount;
} seqcount_latch_t;
# 680 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
{




return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_84(void) ; if (!((sizeof(s->seqcount.sequence) == sizeof(char) || sizeof(s->seqcount.sequence) == sizeof(short) || sizeof(s->seqcount.sequence) == sizeof(int) || sizeof(s->seqcount.sequence) == sizeof(long)) || sizeof(s->seqcount.sequence) == sizeof(long long))) __compiletime_assert_84(); } while (0); (*(const volatile typeof( _Generic((s->seqcount.sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->seqcount.sequence))) *)&(s->seqcount.sequence)); });
}
# 696 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
{
return do_read_seqcount_retry(_Generic(*(&s->seqcount), seqcount_t: __seqprop_ptr((void *)(&s->seqcount)), seqcount_raw_spinlock_t: __seqprop_raw_spinlock_ptr((void *)((&s->seqcount))), seqcount_spinlock_t: __seqprop_spinlock_ptr((void *)((&s->seqcount))), seqcount_rwlock_t: __seqprop_rwlock_ptr((void *)((&s->seqcount))), seqcount_mutex_t: __seqprop_mutex_ptr((void *)((&s->seqcount))), seqcount_ww_mutex_t: __seqprop_ww_mutex_ptr((void *)((&s->seqcount)))), start);
}
# 783 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void raw_write_seqcount_latch(seqcount_latch_t *s)
{
do { do { } while (0); __asm__ __volatile__ ("fence " "w" "," "w" : : : "memory"); } while (0);
s->seqcount.sequence++;
do { do { } while (0); __asm__ __volatile__ ("fence " "w" "," "w" : : : "memory"); } while (0);
}
# 800 "./include/linux/seqlock.h"
typedef struct {




seqcount_spinlock_t seqcount;
spinlock_t lock;
} seqlock_t;
# 838 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned read_seqbegin(const seqlock_t *sl)
{
unsigned ret = ({ seqcount_lockdep_reader_access(_Generic(*(&sl->seqcount), seqcount_t: __seqprop_ptr((void *)(&sl->seqcount)), seqcount_raw_spinlock_t: __seqprop_raw_spinlock_ptr((void *)((&sl->seqcount))), seqcount_spinlock_t: __seqprop_spinlock_ptr((void *)((&sl->seqcount))), seqcount_rwlock_t: __seqprop_rwlock_ptr((void *)((&sl->seqcount))), seqcount_mutex_t: __seqprop_mutex_ptr((void *)((&sl->seqcount))), seqcount_ww_mutex_t: __seqprop_ww_mutex_ptr((void *)((&sl->seqcount))))); ({ unsigned _seq = ({ unsigned __seq; while ((__seq = _Generic(*(&sl->seqcount), seqcount_t: __seqprop_sequence((void *)(&sl->seqcount)), seqcount_raw_spinlock_t: __seqprop_raw_spinlock_sequence((void *)((&sl->seqcount))), seqcount_spinlock_t: __seqprop_spinlock_sequence((void *)((&sl->seqcount))), seqcount_rwlock_t: __seqprop_rwlock_sequence((void *)((&sl->seqcount))), seqcount_mutex_t: __seqprop_mutex_sequence((void *)((&sl->seqcount))), seqcount_ww_mutex_t: __seqprop_ww_mutex_sequence((void *)((&sl->seqcount))))) & 1) cpu_relax(); kcsan_atomic_next(1000); __seq; }); do { do { } while (0); __asm__ __volatile__ ("fence " "r" "," "r" : : : "memory"); } while (0); _seq; }); });

kcsan_atomic_next(0);
kcsan_flat_atomic_begin();
return ret;
}
# 858 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{




kcsan_flat_atomic_end();

return do_read_seqcount_retry(_Generic(*(&sl->seqcount), seqcount_t: __seqprop_ptr((void *)(&sl->seqcount)), seqcount_raw_spinlock_t: __seqprop_raw_spinlock_ptr((void *)((&sl->seqcount))), seqcount_spinlock_t: __seqprop_spinlock_ptr((void *)((&sl->seqcount))), seqcount_rwlock_t: __seqprop_rwlock_ptr((void *)((&sl->seqcount))), seqcount_mutex_t: __seqprop_mutex_ptr((void *)((&sl->seqcount))), seqcount_ww_mutex_t: __seqprop_ww_mutex_ptr((void *)((&sl->seqcount)))), start);
}
# 888 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
}
# 901 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void write_sequnlock(seqlock_t *sl)
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock);
}
# 914 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
}
# 928 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void write_sequnlock_bh(seqlock_t *sl)
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock);
}
# 941 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
}
# 954 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void write_sequnlock_irq(seqlock_t *sl)
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long __write_seqlock_irqsave(seqlock_t *sl)
{
unsigned long flags;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&sl->lock)); } while (0); } while (0);
do_write_seqcount_begin(&sl->seqcount.seqcount);
return flags;
}
# 991 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
# 1014 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void read_seqlock_excl(seqlock_t *sl)
{
spin_lock(&sl->lock);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void read_sequnlock_excl(seqlock_t *sl)
{
spin_unlock(&sl->lock);
}
# 1037 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void read_seqlock_excl_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void read_sequnlock_excl_bh(seqlock_t *sl)
{
spin_unlock_bh(&sl->lock);
}
# 1061 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void read_seqlock_excl_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void read_sequnlock_excl_irq(seqlock_t *sl)
{
spin_unlock_irq(&sl->lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
{
unsigned long flags;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&sl->lock)); } while (0); } while (0);
return flags;
}
# 1104 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
{
spin_unlock_irqrestore(&sl->lock, flags);
}
# 1141 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
{
if (!(*seq & 1))
*seq = read_seqbegin(lock);
else
read_seqlock_excl(lock);
}
# 1156 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int need_seqretry(seqlock_t *lock, int seq)
{
return !(seq & 1) && read_seqretry(lock, seq);
}
# 1169 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void done_seqretry(seqlock_t *lock, int seq)
{
if (seq & 1)
read_sequnlock_excl(lock);
}
# 1195 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
{
unsigned long flags = 0;

if (!(*seq & 1))
*seq = read_seqbegin(lock);
else
do { flags = __read_seqlock_excl_irqsave(lock); } while (0);

return flags;
}
# 1220 "./include/linux/seqlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
{
if (seq & 1)
read_sequnlock_excl_irqrestore(lock, flags);
}
# 20 "./include/linux/mm_types.h" 2

# 1 "./arch/riscv/include/asm/mmu.h" 1
# 12 "./arch/riscv/include/asm/mmu.h"
typedef struct {



atomic_long_t id;

void *vdso;


cpumask_t icache_stale_mask;

} mm_context_t;

void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot);
# 22 "./include/linux/mm_types.h" 2








struct address_space;
struct mem_cgroup;
# 72 "./include/linux/mm_types.h"
struct page {
unsigned long flags;







union {
struct {





union {
struct list_head lru;

struct {

void *__filler;

unsigned int mlock_count;
};
};

struct address_space *mapping;
unsigned long index;






unsigned long private;
};
struct {




unsigned long pp_magic;
struct page_pool *pp;
unsigned long _pp_mapping_pad;
unsigned long dma_addr;
union {




unsigned long dma_addr_upper;




atomic_long_t pp_frag_count;
};
};
struct {
unsigned long compound_head;


unsigned char compound_dtor;
unsigned char compound_order;
atomic_t compound_mapcount;
atomic_t compound_pincount;

unsigned int compound_nr;

};
struct {
unsigned long _compound_pad_1;
unsigned long _compound_pad_2;

struct list_head deferred_list;
};
struct {
unsigned long _pt_pad_1;
pgtable_t pmd_huge_pte;
unsigned long _pt_pad_2;
union {
struct mm_struct *pt_mm;
atomic_t pt_frag_refcount;
};

spinlock_t *ptl;



};
struct {

struct dev_pagemap *pgmap;
void *zone_device_data;
# 177 "./include/linux/mm_types.h"
};


struct callback_head callback_head;
};

union {




atomic_t _mapcount;







unsigned int page_type;
};


atomic_t _refcount;
# 224 "./include/linux/mm_types.h"
} ;
# 250 "./include/linux/mm_types.h"
struct folio {

union {
struct {

unsigned long flags;
union {
struct list_head lru;
struct {
void *__filler;
unsigned int mlock_count;
};
};
struct address_space *mapping;
unsigned long index;
void *private;
atomic_t _mapcount;
atomic_t _refcount;




};
struct page page;
};
};

_Static_assert(sizeof(struct page) == sizeof(struct folio), "sizeof(struct page) == sizeof(struct folio)");


_Static_assert(__builtin_offsetof(struct page, flags) == __builtin_offsetof(struct folio, flags), "offsetof(struct page, flags) == offsetof(struct folio, flags)");
_Static_assert(__builtin_offsetof(struct page, lru) == __builtin_offsetof(struct folio, lru), "offsetof(struct page, lru) == offsetof(struct folio, lru)");
_Static_assert(__builtin_offsetof(struct page, mapping) == __builtin_offsetof(struct folio, mapping), "offsetof(struct page, mapping) == offsetof(struct folio, mapping)");
_Static_assert(__builtin_offsetof(struct page, compound_head) == __builtin_offsetof(struct folio, lru), "offsetof(struct page, compound_head) == offsetof(struct folio, lru)");
_Static_assert(__builtin_offsetof(struct page, index) == __builtin_offsetof(struct folio, index), "offsetof(struct page, index) == offsetof(struct folio, index)");
_Static_assert(__builtin_offsetof(struct page, private) == __builtin_offsetof(struct folio, private), "offsetof(struct page, private) == offsetof(struct folio, private)");
_Static_assert(__builtin_offsetof(struct page, _mapcount) == __builtin_offsetof(struct folio, _mapcount), "offsetof(struct page, _mapcount) == offsetof(struct folio, _mapcount)");
_Static_assert(__builtin_offsetof(struct page, _refcount) == __builtin_offsetof(struct folio, _refcount), "offsetof(struct page, _refcount) == offsetof(struct folio, _refcount)");





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) atomic_t *folio_mapcount_ptr(struct folio *folio)
{
struct page *tail = &folio->page + 1;
return &tail->compound_mapcount;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) atomic_t *compound_mapcount_ptr(struct page *page)
{
return &page[1].compound_mapcount;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) atomic_t *compound_pincount_ptr(struct page *page)
{
return &page[1].compound_pincount;
}
# 325 "./include/linux/mm_types.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_page_private(struct page *page, unsigned long private)
{
page->private = private;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *folio_get_private(struct folio *folio)
{
return folio->private;
}

struct page_frag_cache {
void * va;

__u16 offset;
__u16 size;






unsigned int pagecnt_bias;
bool pfmemalloc;
};

typedef unsigned long vm_flags_t;






struct vm_region {
struct rb_node vm_rb;
vm_flags_t vm_flags;
unsigned long vm_start;
unsigned long vm_end;
unsigned long vm_top;
unsigned long vm_pgoff;
struct file *vm_file;

int vm_usage;
bool vm_icache_flushed : 1;

};
# 378 "./include/linux/mm_types.h"
struct vm_userfaultfd_ctx {};


struct anon_vma_name {
struct kref kref;

char name[];
};







struct vm_area_struct {


unsigned long vm_start;
unsigned long vm_end;



struct vm_area_struct *vm_next, *vm_prev;

struct rb_node vm_rb;







unsigned long rb_subtree_gap;



struct mm_struct *vm_mm;





pgprot_t vm_page_prot;
unsigned long vm_flags;
# 432 "./include/linux/mm_types.h"
union {
struct {
struct rb_node rb;
unsigned long rb_subtree_last;
} shared;




struct anon_vma_name *anon_name;
};







struct list_head anon_vma_chain;

struct anon_vma *anon_vma;


const struct vm_operations_struct *vm_ops;


unsigned long vm_pgoff;

struct file * vm_file;
void * vm_private_data;


atomic_long_t swap_readahead_info;







struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} ;

struct kioctx_table;
struct mm_struct {
struct {
struct vm_area_struct *mmap;
struct rb_root mm_rb;
u64 vmacache_seqnum;

unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);

unsigned long mmap_base;
unsigned long mmap_legacy_base;





unsigned long task_size;
unsigned long highest_vm_end;
pgd_t * pgd;
# 504 "./include/linux/mm_types.h"
atomic_t membarrier_state;
# 516 "./include/linux/mm_types.h"
atomic_t mm_users;
# 525 "./include/linux/mm_types.h"
atomic_t mm_count;


atomic_long_t pgtables_bytes;

int map_count;

spinlock_t page_table_lock;
# 547 "./include/linux/mm_types.h"
struct rw_semaphore mmap_lock;

struct list_head mmlist;






unsigned long hiwater_rss;
unsigned long hiwater_vm;

unsigned long total_vm;
unsigned long locked_vm;
atomic64_t pinned_vm;
unsigned long data_vm;
unsigned long exec_vm;
unsigned long stack_vm;
unsigned long def_flags;






seqcount_t write_protect_seq;

spinlock_t arg_lock;

unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;

unsigned long saved_auxv[(2*(7 + 20 + 1))];





struct mm_rss_stat rss_stat;

struct linux_binfmt *binfmt;


mm_context_t context;

unsigned long flags;


spinlock_t ioctx_lock;
struct kioctx_table *ioctx_table;
# 612 "./include/linux/mm_types.h"
struct user_namespace *user_ns;


struct file *exe_file;

struct mmu_notifier_subscriptions *notifier_subscriptions;
# 641 "./include/linux/mm_types.h"
atomic_t tlb_flush_pending;




struct uprobes_state uprobes_state;






struct work_struct async_put_work;




} ;





unsigned long cpu_bitmap[];
};

extern struct mm_struct init_mm;


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mm_init_cpumask(struct mm_struct *mm)
{
unsigned long cpu_bitmap = (unsigned long)mm;

cpu_bitmap += __builtin_offsetof(struct mm_struct, cpu_bitmap);
cpumask_clear((struct cpumask *)cpu_bitmap);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) cpumask_t *mm_cpumask(struct mm_struct *mm)
{
return (struct cpumask *)&mm->cpu_bitmap;
}

struct mmu_gather;
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_finish_mmu(struct mmu_gather *tlb);

struct vm_fault;






typedef unsigned int vm_fault_t;
# 723 "./include/linux/mm_types.h"
enum vm_fault_reason {
VM_FAULT_OOM = ( vm_fault_t)0x000001,
VM_FAULT_SIGBUS = ( vm_fault_t)0x000002,
VM_FAULT_MAJOR = ( vm_fault_t)0x000004,
VM_FAULT_WRITE = ( vm_fault_t)0x000008,
VM_FAULT_HWPOISON = ( vm_fault_t)0x000010,
VM_FAULT_HWPOISON_LARGE = ( vm_fault_t)0x000020,
VM_FAULT_SIGSEGV = ( vm_fault_t)0x000040,
VM_FAULT_NOPAGE = ( vm_fault_t)0x000100,
VM_FAULT_LOCKED = ( vm_fault_t)0x000200,
VM_FAULT_RETRY = ( vm_fault_t)0x000400,
VM_FAULT_FALLBACK = ( vm_fault_t)0x000800,
VM_FAULT_DONE_COW = ( vm_fault_t)0x001000,
VM_FAULT_NEEDDSYNC = ( vm_fault_t)0x002000,
VM_FAULT_HINDEX_MASK = ( vm_fault_t)0x0f0000,
};
# 763 "./include/linux/mm_types.h"
struct vm_special_mapping {
const char *name;







struct page **pages;





vm_fault_t (*fault)(const struct vm_special_mapping *sm,
struct vm_area_struct *vma,
struct vm_fault *vmf);

int (*mremap)(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma);
};

enum tlb_flush_reason {
TLB_FLUSH_ON_TASK_SWITCH,
TLB_REMOTE_SHOOTDOWN,
TLB_LOCAL_SHOOTDOWN,
TLB_LOCAL_MM_SHOOTDOWN,
TLB_REMOTE_SEND_IPI,
NR_TLB_FLUSH_REASONS,
};





typedef struct {
unsigned long val;
} swp_entry_t;
# 835 "./include/linux/mm_types.h"
enum fault_flag {
FAULT_FLAG_WRITE = 1 << 0,
FAULT_FLAG_MKWRITE = 1 << 1,
FAULT_FLAG_ALLOW_RETRY = 1 << 2,
FAULT_FLAG_RETRY_NOWAIT = 1 << 3,
FAULT_FLAG_KILLABLE = 1 << 4,
FAULT_FLAG_TRIED = 1 << 5,
FAULT_FLAG_USER = 1 << 6,
FAULT_FLAG_REMOTE = 1 << 7,
FAULT_FLAG_INSTRUCTION = 1 << 8,
FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
};
# 11 "./include/linux/uio.h" 2
# 1 "./include/uapi/linux/uio.h" 1
# 17 "./include/uapi/linux/uio.h"
struct iovec
{
void *iov_base;
__kernel_size_t iov_len;
};
# 12 "./include/linux/uio.h" 2

struct page;
struct pipe_inode_info;

struct kvec {
void *iov_base;
size_t iov_len;
};

enum iter_type {

ITER_IOVEC,
ITER_KVEC,
ITER_BVEC,
ITER_PIPE,
ITER_XARRAY,
ITER_DISCARD,
};

struct iov_iter_state {
size_t iov_offset;
size_t count;
unsigned long nr_segs;
};

struct iov_iter {
u8 iter_type;
bool nofault;
bool data_source;
size_t iov_offset;
size_t count;
union {
const struct iovec *iov;
const struct kvec *kvec;
const struct bio_vec *bvec;
struct xarray *xarray;
struct pipe_inode_info *pipe;
};
union {
unsigned long nr_segs;
struct {
unsigned int head;
unsigned int start_head;
};
loff_t xarray_start;
};
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) enum iter_type iov_iter_type(const struct iov_iter *i)
{
return i->iter_type;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iov_iter_save_state(struct iov_iter *iter,
struct iov_iter_state *state)
{
state->iov_offset = iter->iov_offset;
state->count = iter->count;
state->nr_segs = iter->nr_segs;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool iter_is_iovec(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_IOVEC;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool iov_iter_is_kvec(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_KVEC;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool iov_iter_is_bvec(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_BVEC;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool iov_iter_is_pipe(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_PIPE;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool iov_iter_is_discard(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_DISCARD;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool iov_iter_is_xarray(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_XARRAY;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned char iov_iter_rw(const struct iov_iter *i)
{
return i->data_source ? 1 : 0;
}
# 115 "./include/linux/uio.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
{
unsigned long seg;
size_t ret = 0;

for (seg = 0; seg < nr_segs; seg++)
ret += iov[seg].iov_len;
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct iovec iov_iter_iovec(const struct iov_iter *iter)
{
return (struct iovec) {
.iov_base = iter->iov->iov_base + iter->iov_offset,
.iov_len = __builtin_choose_expr(((!!(sizeof((typeof(iter->count) *)1 == (typeof(iter->iov->iov_len - iter->iov_offset) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(iter->count) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(iter->iov->iov_len - iter->iov_offset) * 0l)) : (int *)8))))), ((iter->count) < (iter->iov->iov_len - iter->iov_offset) ? (iter->count) : (iter->iov->iov_len - iter->iov_offset)), ({ typeof(iter->count) __UNIQUE_ID___x85 = (iter->count); typeof(iter->iov->iov_len - iter->iov_offset) __UNIQUE_ID___y86 = (iter->iov->iov_len - iter->iov_offset); ((__UNIQUE_ID___x85) < (__UNIQUE_ID___y86) ? (__UNIQUE_ID___x85) : (__UNIQUE_ID___y86)); })),

};
}

size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
size_t bytes, struct iov_iter *i);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
void iov_iter_revert(struct iov_iter *i, size_t bytes);
size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);

size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) size_t copy_folio_to_iter(struct folio *folio, size_t offset,
size_t bytes, struct iov_iter *i)
{
return copy_page_to_iter(&folio->page, offset, bytes, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__))
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
if (__builtin_expect(!!(!check_copy_size(addr, bytes, true)), 0))
return 0;
else
return _copy_to_iter(addr, bytes, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__))
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
if (__builtin_expect(!!(!check_copy_size(addr, bytes, false)), 0))
return 0;
else
return _copy_from_iter(addr, bytes, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__))
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
size_t copied = copy_from_iter(addr, bytes, i);
if (__builtin_expect(!!(copied == bytes), 1))
return true;
iov_iter_revert(i, copied);
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__))
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
if (__builtin_expect(!!(!check_copy_size(addr, bytes, false)), 0))
return 0;
else
return _copy_from_iter_nocache(addr, bytes, i);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__))
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
size_t copied = copy_from_iter_nocache(addr, bytes, i);
if (__builtin_expect(!!(copied == bytes), 1))
return true;
iov_iter_revert(i, copied);
return false;
}
# 221 "./include/linux/uio.h"
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
unsigned long nr_segs, size_t count);
void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
unsigned long nr_segs, size_t count);
void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
unsigned long nr_segs, size_t count);
void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
size_t count);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
loff_t start, size_t count);
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);

const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) size_t iov_iter_count(const struct iov_iter *i)
{
return i->count;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iov_iter_truncate(struct iov_iter *i, u64 count)
{






if (i->count > count)
i->count = count;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
i->count = count;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
{
size_t shorted = 0;
int npages;

if (iov_iter_count(i) > max_bytes) {
shorted = iov_iter_count(i) - max_bytes;
iov_iter_truncate(i, max_bytes);
}
npages = iov_iter_npages(i, ((int)(~0U >> 1)));
if (shorted)
iov_iter_reexpand(i, iov_iter_count(i) + shorted);

return npages;
}

struct csum_state {
__wsum csum;
size_t off;
};

size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__))
bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
__wsum *csum, struct iov_iter *i)
{
size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
if (__builtin_expect(!!(copied == bytes), 1))
return true;
iov_iter_revert(i, copied);
return false;
}
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
struct iov_iter *i);

struct iovec *iovec_from_user(const struct iovec *uvector,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_iov, bool compat);
ssize_t import_iovec(int type, const struct iovec *uvec,
unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
struct iov_iter *i);
ssize_t __import_iovec(int type, const struct iovec *uvec,
unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
struct iov_iter *i, bool compat);
int import_single_range(int type, void *buf, size_t len,
struct iovec *iov, struct iov_iter *i);
# 9 "./include/linux/socket.h" 2


# 1 "./include/uapi/linux/socket.h" 1
# 10 "./include/uapi/linux/socket.h"
typedef unsigned short __kernel_sa_family_t;





struct __kernel_sockaddr_storage {
union {
struct {
__kernel_sa_family_t ss_family;

char __data[128 - sizeof(unsigned short)];


};
void *__align;
};
};
# 12 "./include/linux/socket.h" 2

struct file;
struct pid;
struct cred;
struct socket;





struct seq_file;
extern void socket_seq_show(struct seq_file *seq);


typedef __kernel_sa_family_t sa_family_t;





struct sockaddr {
sa_family_t sa_family;
char sa_data[14];
};

struct linger {
int l_onoff;
int l_linger;
};
# 50 "./include/linux/socket.h"
struct msghdr {
void *msg_name;
int msg_namelen;
struct iov_iter msg_iter;






union {
void *msg_control;
void *msg_control_user;
};
bool msg_control_is_user : 1;
__kernel_size_t msg_controllen;
unsigned int msg_flags;
struct kiocb *msg_iocb;
};

struct user_msghdr {
void *msg_name;
int msg_namelen;
struct iovec *msg_iov;
__kernel_size_t msg_iovlen;
void *msg_control;
__kernel_size_t msg_controllen;
unsigned int msg_flags;
};


struct mmsghdr {
struct user_msghdr msg_hdr;
unsigned int msg_len;
};







struct cmsghdr {
__kernel_size_t cmsg_len;
int cmsg_level;
int cmsg_type;
};
# 141 "./include/linux/socket.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
struct cmsghdr *__cmsg)
{
struct cmsghdr * __ptr;

__ptr = (struct cmsghdr*)(((unsigned char *) __cmsg) + ( ((__cmsg->cmsg_len)+sizeof(long)-1) & ~(sizeof(long)-1) ));
if ((unsigned long)((char*)(__ptr+1) - (char *) __ctl) > __size)
return (struct cmsghdr *)0;

return __ptr;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg)
{
return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) size_t msg_data_left(struct msghdr *msg)
{
return iov_iter_count(&msg->msg_iter);
}







struct ucred {
__u32 pid;
__u32 uid;
__u32 gid;
};
# 374 "./include/linux/socket.h"
extern int move_addr_to_kernel(void *uaddr, int ulen, struct __kernel_sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);

struct timespec64;
struct __kernel_timespec;
struct old_timespec32;

struct scm_timestamping_internal {
struct timespec64 ts[3];
};

extern void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss);
extern void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_internal *tss);




extern long __sys_recvmsg(int fd, struct user_msghdr *msg,
unsigned int flags, bool forbid_cmsg_compat);
extern long __sys_sendmsg(int fd, struct user_msghdr *msg,
unsigned int flags, bool forbid_cmsg_compat);
extern int __sys_recvmmsg(int fd, struct mmsghdr *mmsg,
unsigned int vlen, unsigned int flags,
struct __kernel_timespec *timeout,
struct old_timespec32 *timeout32);
extern int __sys_sendmmsg(int fd, struct mmsghdr *mmsg,
unsigned int vlen, unsigned int flags,
bool forbid_cmsg_compat);
extern long __sys_sendmsg_sock(struct socket *sock, struct msghdr *msg,
unsigned int flags);
extern long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg,
struct user_msghdr *umsg,
struct sockaddr *uaddr,
unsigned int flags);
extern int sendmsg_copy_msghdr(struct msghdr *msg,
struct user_msghdr *umsg, unsigned flags,
struct iovec **iov);
extern int recvmsg_copy_msghdr(struct msghdr *msg,
struct user_msghdr *umsg, unsigned flags,
struct sockaddr **uaddr,
struct iovec **iov);
extern int __copy_msghdr_from_user(struct msghdr *kmsg,
struct user_msghdr *umsg,
struct sockaddr **save_addr,
struct iovec **uiov, size_t *nsegs);


extern int __sys_recvfrom(int fd, void *ubuf, size_t size,
unsigned int flags, struct sockaddr *addr,
int *addr_len);
extern int __sys_sendto(int fd, void *buff, size_t len,
unsigned int flags, struct sockaddr *addr,
int addr_len);
extern int __sys_accept4_file(struct file *file, unsigned file_flags,
struct sockaddr *upeer_sockaddr,
int *upeer_addrlen, int flags,
unsigned long nofile);
extern struct file *do_accept(struct file *file, unsigned file_flags,
struct sockaddr *upeer_sockaddr,
int *upeer_addrlen, int flags);
extern int __sys_accept4(int fd, struct sockaddr *upeer_sockaddr,
int *upeer_addrlen, int flags);
extern int __sys_socket(int family, int type, int protocol);
extern int __sys_bind(int fd, struct sockaddr *umyaddr, int addrlen);
extern int __sys_connect_file(struct file *file, struct __kernel_sockaddr_storage *addr,
int addrlen, int file_flags);
extern int __sys_connect(int fd, struct sockaddr *uservaddr,
int addrlen);
extern int __sys_listen(int fd, int backlog);
extern int __sys_getsockname(int fd, struct sockaddr *usockaddr,
int *usockaddr_len);
extern int __sys_getpeername(int fd, struct sockaddr *usockaddr,
int *usockaddr_len);
extern int __sys_socketpair(int family, int type, int protocol,
int *usockvec);
extern int __sys_shutdown_sock(struct socket *sock, int how);
extern int __sys_shutdown(int fd, int how);
# 31 "net/ipv6/route.c" 2

# 1 "./include/linux/net.h" 1
# 18 "./include/linux/net.h"
# 1 "./include/linux/random.h" 1








# 1 "./include/linux/once.h" 1





# 1 "./include/linux/jump_label.h" 1
# 79 "./include/linux/jump_label.h"
extern bool static_key_initialized;





struct static_key {
atomic_t enabled;
# 107 "./include/linux/jump_label.h"
};
# 191 "./include/linux/jump_label.h"
enum jump_label_type {
JUMP_LABEL_NOP = 0,
JUMP_LABEL_JMP,
};

struct module;
# 259 "./include/linux/jump_label.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int static_key_count(struct static_key *key)
{
return atomic_read(&key->enabled);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void jump_label_init(void)
{
static_key_initialized = true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool static_key_false(struct static_key *key)
{
if (__builtin_expect(!!(static_key_count(key) > 0), 0))
return true;
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool static_key_true(struct static_key *key)
{
if (__builtin_expect(!!(static_key_count(key) > 0), 1))
return true;
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void static_key_slow_inc(struct static_key *key)
{
({ int __ret_warn_on = !!(!static_key_initialized); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("%s(): static key '%pS' used before call to jump_label_init()", __func__, (key)); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/jump_label.h"), "i" (285), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
atomic_inc(&key->enabled);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void static_key_slow_dec(struct static_key *key)
{
({ int __ret_warn_on = !!(!static_key_initialized); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("%s(): static key '%pS' used before call to jump_label_init()", __func__, (key)); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/jump_label.h"), "i" (291), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
atomic_dec(&key->enabled);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int jump_label_text_reserved(void *start, void *end)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void jump_label_lock(void) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void jump_label_unlock(void) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int jump_label_apply_nops(struct module *mod)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void static_key_enable(struct static_key *key)
{
({ int __ret_warn_on = !!(!static_key_initialized); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("%s(): static key '%pS' used before call to jump_label_init()", __func__, (key)); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/jump_label.h"), "i" (313), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });

if (atomic_read(&key->enabled) != 0) {
({ int __ret_warn_on = !!(atomic_read(&key->enabled) != 1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/jump_label.h"), "i" (316), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return;
}
atomic_set(&key->enabled, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void static_key_disable(struct static_key *key)
{
({ int __ret_warn_on = !!(!static_key_initialized); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("%s(): static key '%pS' used before call to jump_label_init()", __func__, (key)); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/jump_label.h"), "i" (324), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });

if (atomic_read(&key->enabled) != 1) {
({ int __ret_warn_on = !!(atomic_read(&key->enabled) != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/jump_label.h"), "i" (327), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return;
}
atomic_set(&key->enabled, 0);
}
# 353 "./include/linux/jump_label.h"
struct static_key_true {
struct static_key key;
};

struct static_key_false {
struct static_key key;
};
# 407 "./include/linux/jump_label.h"
extern bool ____wrong_branch_error(void);
# 7 "./include/linux/once.h" 2

bool __do_once_start(bool *done, unsigned long *flags);
void __do_once_done(bool *done, struct static_key_true *once_key,
unsigned long *flags, struct module *mod);
# 10 "./include/linux/random.h" 2

# 1 "./include/uapi/linux/random.h" 1
# 12 "./include/uapi/linux/random.h"
# 1 "./include/uapi/linux/ioctl.h" 1




# 1 "./arch/riscv/include/generated/uapi/asm/ioctl.h" 1
# 1 "./include/asm-generic/ioctl.h" 1




# 1 "./include/uapi/asm-generic/ioctl.h" 1
# 6 "./include/asm-generic/ioctl.h" 2





extern unsigned int __invalid_size_argument_for_IOC;
# 2 "./arch/riscv/include/generated/uapi/asm/ioctl.h" 2
# 6 "./include/uapi/linux/ioctl.h" 2
# 13 "./include/uapi/linux/random.h" 2
# 1 "./include/linux/irqnr.h" 1




# 1 "./include/uapi/linux/irqnr.h" 1
# 6 "./include/linux/irqnr.h" 2


extern int nr_irqs;
extern struct irq_desc *irq_to_desc(unsigned int irq);
unsigned int irq_get_next_irq(unsigned int offset);
# 14 "./include/uapi/linux/random.h" 2
# 41 "./include/uapi/linux/random.h"
struct rand_pool_info {
int entropy_count;
int buf_size;
__u32 buf[0];
};
# 12 "./include/linux/random.h" 2

struct notifier_block;

extern void add_device_randomness(const void *, size_t);
extern void add_bootloader_randomness(const void *, size_t);
# 25 "./include/linux/random.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void add_latent_entropy(void) {}


extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value) ;
extern void add_interrupt_randomness(int irq) ;
extern void add_hwgenerator_randomness(const void *buffer, size_t count,
size_t entropy);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; }


extern void get_random_bytes(void *buf, size_t nbytes);
extern int wait_for_random_bytes(void);
extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) rand_initialize(void);
extern bool rng_is_initialized(void);
extern int register_random_ready_notifier(struct notifier_block *nb);
extern int unregister_random_ready_notifier(struct notifier_block *nb);
extern size_t __attribute__((__warn_unused_result__)) get_random_bytes_arch(void *buf, size_t nbytes);


extern const struct file_operations random_fops, urandom_fops;


u32 get_random_u32(void);
u64 get_random_u64(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int get_random_int(void)
{
return get_random_u32();
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long get_random_long(void)
{

return get_random_u64();



}
# 83 "./include/linux/random.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long get_random_canary(void)
{
unsigned long val = get_random_long();

return val & 0xffffffffffffff00UL;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_random_bytes_wait(void *buf, size_t nbytes)
{
int ret = wait_for_random_bytes();
get_random_bytes(buf, nbytes);
return ret;
}
# 107 "./include/linux/random.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_random_u32_wait(u32 *out) { int ret = wait_for_random_bytes(); if (__builtin_expect(!!(ret), 0)) return ret; *out = get_random_u32(); return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_random_u64_wait(u64 *out) { int ret = wait_for_random_bytes(); if (__builtin_expect(!!(ret), 0)) return ret; *out = get_random_u64(); return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_random_int_wait(int *out) { int ret = wait_for_random_bytes(); if (__builtin_expect(!!(ret), 0)) return ret; *out = get_random_int(); return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_random_long_wait(long *out) { int ret = wait_for_random_bytes(); if (__builtin_expect(!!(ret), 0)) return ret; *out = get_random_long(); return 0; }


unsigned long randomize_page(unsigned long start, unsigned long range);







# 1 "./include/linux/prandom.h" 1
# 12 "./include/linux/prandom.h"
# 1 "./include/linux/percpu.h" 1




# 1 "./include/linux/mmdebug.h" 1







struct page;
struct vm_area_struct;
struct mm_struct;

void dump_page(struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
# 6 "./include/linux/percpu.h" 2






# 1 "./arch/riscv/include/generated/asm/percpu.h" 1
# 13 "./include/linux/percpu.h" 2
# 64 "./include/linux/percpu.h"
extern void *pcpu_base_addr;
extern const unsigned long *pcpu_unit_offsets;

struct pcpu_group_info {
int nr_units;
unsigned long base_offset;
unsigned int *cpu_map;

};

struct pcpu_alloc_info {
size_t static_size;
size_t reserved_size;
size_t dyn_size;
size_t unit_size;
size_t atom_size;
size_t alloc_size;
size_t __ai_size;
int nr_groups;
struct pcpu_group_info groups[];
};

enum pcpu_fc {
PCPU_FC_AUTO,
PCPU_FC_EMBED,
PCPU_FC_PAGE,

PCPU_FC_NR,
};
extern const char * const pcpu_fc_names[PCPU_FC_NR];

extern enum pcpu_fc pcpu_chosen_fc;

typedef int (pcpu_fc_cpu_to_node_fn_t)(int cpu);
typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);

extern struct pcpu_alloc_info * __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) pcpu_alloc_alloc_info(int nr_groups,
int nr_units);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) pcpu_free_alloc_info(struct pcpu_alloc_info *ai);

extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
void *base_addr);
# 120 "./include/linux/percpu.h"
extern void *__alloc_reserved_percpu(size_t size, size_t align) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);


extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) setup_per_cpu_areas(void);


extern void *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
extern void *__alloc_percpu(size_t size, size_t align) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
extern void free_percpu(void *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
# 140 "./include/linux/percpu.h"
extern unsigned long pcpu_nr_pages(void);
# 13 "./include/linux/prandom.h" 2

u32 prandom_u32(void);
void prandom_bytes(void *buf, size_t nbytes);
void prandom_seed(u32 seed);
void prandom_reseed_late(void);

extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) net_rand_noise;
# 59 "./include/linux/prandom.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void prandom_u32_add_noise(unsigned long a, unsigned long b,
unsigned long c, unsigned long d)
{




a ^= ({ typeof(net_rand_noise) pscr_ret__; do { const void *__vpp_verify = (typeof((&(net_rand_noise)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(net_rand_noise)) { case 1: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(net_rand_noise)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise))); (typeof((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 2: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(net_rand_noise)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise))); (typeof((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 4: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(net_rand_noise)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise))); (typeof((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 8: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(net_rand_noise)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise))); (typeof((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; });
( a += b, b = rol64(b, 13), c += d, d = rol64(d, 16), b ^= a, a = rol64(a, 32), d ^= c, a += d, d = rol64(d, 21), c += b, b = rol64(b, 17), d ^= a, b ^= c, c = rol64(c, 32) );
do { do { const void *__vpp_verify = (typeof((&(net_rand_noise)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(net_rand_noise)) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&(net_rand_noise)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise))); (typeof((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) = d; } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&(net_rand_noise)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise))); (typeof((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) = d; } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&(net_rand_noise)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise))); (typeof((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) = d; } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&(net_rand_noise)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise))); (typeof((typeof(*(&(net_rand_noise))) *)(&(net_rand_noise)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) = d; } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

struct rnd_state {
__u32 s1, s2, s3, s4;
};

u32 prandom_u32_state(struct rnd_state *state);
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
void prandom_seed_full_state(struct rnd_state *pcpu_state);
# 94 "./include/linux/prandom.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 prandom_u32_max(u32 ep_ro)
{
return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 __seed(u32 x, u32 m)
{
return (x < m) ? x + m : x;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void prandom_seed_state(struct rnd_state *state, u64 seed)
{
u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL;

state->s1 = __seed(i, 2U);
state->s2 = __seed(i, 8U);
state->s3 = __seed(i, 16U);
state->s4 = __seed(i, 128U);
prandom_u32_add_noise((unsigned long)(state), (unsigned long)(i), (unsigned long)(0), (unsigned long)(0));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 next_pseudo_random32(u32 seed)
{
return seed * 1664525 + 1013904223;
}
# 121 "./include/linux/random.h" 2




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __attribute__((__warn_unused_result__)) arch_get_random_long(unsigned long *v)
{
return false;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __attribute__((__warn_unused_result__)) arch_get_random_int(unsigned int *v)
{
return false;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __attribute__((__warn_unused_result__)) arch_get_random_seed_long(unsigned long *v)
{
return false;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __attribute__((__warn_unused_result__)) arch_get_random_seed_int(unsigned int *v)
{
return false;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) arch_get_random_seed_long_early(unsigned long *v)
{
({ int __ret_warn_on = !!(system_state != SYSTEM_BOOTING); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/random.h"), "i" (150), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return arch_get_random_seed_long(v);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) arch_get_random_long_early(unsigned long *v)
{
({ int __ret_warn_on = !!(system_state != SYSTEM_BOOTING); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/random.h"), "i" (158), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return arch_get_random_long(v);
}



extern int random_prepare_cpu(unsigned int cpu);
extern int random_online_cpu(unsigned int cpu);
# 19 "./include/linux/net.h" 2

# 1 "./include/linux/fcntl.h" 1




# 1 "./include/linux/stat.h" 1





# 1 "./arch/riscv/include/generated/uapi/asm/stat.h" 1
# 1 "./include/uapi/asm-generic/stat.h" 1
# 24 "./include/uapi/asm-generic/stat.h"
struct stat {
unsigned long st_dev;
unsigned long st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned int st_uid;
unsigned int st_gid;
unsigned long st_rdev;
unsigned long __pad1;
long st_size;
int st_blksize;
int __pad2;
long st_blocks;
long st_atime;
unsigned long st_atime_nsec;
long st_mtime;
unsigned long st_mtime_nsec;
long st_ctime;
unsigned long st_ctime_nsec;
unsigned int __unused4;
unsigned int __unused5;
};
# 2 "./arch/riscv/include/generated/uapi/asm/stat.h" 2
# 7 "./include/linux/stat.h" 2
# 1 "./include/uapi/linux/stat.h" 1
# 56 "./include/uapi/linux/stat.h"
struct statx_timestamp {
__s64 tv_sec;
__u32 tv_nsec;
__s32 __reserved;
};
# 99 "./include/uapi/linux/stat.h"
struct statx {

__u32 stx_mask;
__u32 stx_blksize;
__u64 stx_attributes;

__u32 stx_nlink;
__u32 stx_uid;
__u32 stx_gid;
__u16 stx_mode;
__u16 __spare0[1];

__u64 stx_ino;
__u64 stx_size;
__u64 stx_blocks;
__u64 stx_attributes_mask;

struct statx_timestamp stx_atime;
struct statx_timestamp stx_btime;
struct statx_timestamp stx_ctime;
struct statx_timestamp stx_mtime;

__u32 stx_rdev_major;
__u32 stx_rdev_minor;
__u32 stx_dev_major;
__u32 stx_dev_minor;

__u64 stx_mnt_id;
__u64 __spare2;

__u64 __spare3[12];

};
# 8 "./include/linux/stat.h" 2
# 22 "./include/linux/stat.h"
struct kstat {
u32 result_mask;
umode_t mode;
unsigned int nlink;
uint32_t blksize;
u64 attributes;
u64 attributes_mask;
# 41 "./include/linux/stat.h"
u64 ino;
dev_t dev;
dev_t rdev;
kuid_t uid;
kgid_t gid;
loff_t size;
struct timespec64 atime;
struct timespec64 mtime;
struct timespec64 ctime;
struct timespec64 btime;
u64 blocks;
u64 mnt_id;
};
# 6 "./include/linux/fcntl.h" 2
# 1 "./include/uapi/linux/fcntl.h" 1




# 1 "./arch/riscv/include/generated/uapi/asm/fcntl.h" 1
# 1 "./include/uapi/asm-generic/fcntl.h" 1
# 156 "./include/uapi/asm-generic/fcntl.h"
struct f_owner_ex {
int type;
__kernel_pid_t pid;
};
# 200 "./include/uapi/asm-generic/fcntl.h"
struct flock {
short l_type;
short l_whence;
__kernel_off_t l_start;
__kernel_off_t l_len;
__kernel_pid_t l_pid;

};







struct flock64 {
short l_type;
short l_whence;
__kernel_loff_t l_start;
__kernel_loff_t l_len;
__kernel_pid_t l_pid;

};
# 2 "./arch/riscv/include/generated/uapi/asm/fcntl.h" 2
# 6 "./include/uapi/linux/fcntl.h" 2
# 1 "./include/uapi/linux/openat2.h" 1
# 19 "./include/uapi/linux/openat2.h"
struct open_how {
__u64 flags;
__u64 mode;
__u64 resolve;
};
# 7 "./include/uapi/linux/fcntl.h" 2
# 7 "./include/linux/fcntl.h" 2
# 21 "./include/linux/net.h" 2


# 1 "./include/linux/fs.h" 1





# 1 "./include/linux/wait_bit.h" 1
# 10 "./include/linux/wait_bit.h"
struct wait_bit_key {
void *flags;
int bit_nr;
unsigned long timeout;
};

struct wait_bit_queue_entry {
struct wait_bit_key key;
struct wait_queue_entry wq_entry;
};




typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);

void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
void wake_up_bit(void *word, int bit);
int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
struct wait_queue_head *bit_waitqueue(void *word, int bit);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) wait_bit_init(void);

int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
# 49 "./include/linux/wait_bit.h"
extern int bit_wait(struct wait_bit_key *key, int mode);
extern int bit_wait_io(struct wait_bit_key *key, int mode);
extern int bit_wait_timeout(struct wait_bit_key *key, int mode);
extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode);
# 70 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
wait_on_bit(unsigned long *word, int bit, unsigned mode)
{
do { __might_sleep("include/linux/wait_bit.h", 73); __cond_resched(); } while (0);
if (!arch_test_bit(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
bit_wait,
mode);
}
# 95 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
{
do { __might_sleep("include/linux/wait_bit.h", 98); __cond_resched(); } while (0);
if (!arch_test_bit(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
bit_wait_io,
mode);
}
# 121 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
unsigned long timeout)
{
do { __might_sleep("include/linux/wait_bit.h", 125); __cond_resched(); } while (0);
if (!arch_test_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_timeout(word, bit,
bit_wait_timeout,
mode, timeout);
}
# 149 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
unsigned mode)
{
do { __might_sleep("include/linux/wait_bit.h", 153); __cond_resched(); } while (0);
if (!arch_test_bit(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit, action, mode);
}
# 178 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
{
do { __might_sleep("include/linux/wait_bit.h", 181); __cond_resched(); } while (0);
if (!test_and_set_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
}
# 202 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
{
do { __might_sleep("include/linux/wait_bit.h", 205); __cond_resched(); } while (0);
if (!test_and_set_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
}
# 228 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
unsigned mode)
{
do { __might_sleep("include/linux/wait_bit.h", 232); __cond_resched(); } while (0);
if (!test_and_set_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_lock(word, bit, action, mode);
}

extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags);
extern void wake_up_var(void *var);
extern wait_queue_head_t *__var_waitqueue(void *p);
# 330 "./include/linux/wait_bit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_and_wake_up_bit(int bit, void *word)
{
clear_bit_unlock(bit, word);

do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);
wake_up_bit(word, bit);
}
# 7 "./include/linux/fs.h" 2
# 1 "./include/linux/kdev_t.h" 1




# 1 "./include/uapi/linux/kdev_t.h" 1
# 6 "./include/linux/kdev_t.h" 2
# 24 "./include/linux/kdev_t.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool old_valid_dev(dev_t dev)
{
return ((unsigned int) ((dev) >> 20)) < 256 && ((unsigned int) ((dev) & ((1U << 20) - 1))) < 256;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) u16 old_encode_dev(dev_t dev)
{
return (((unsigned int) ((dev) >> 20)) << 8) | ((unsigned int) ((dev) & ((1U << 20) - 1)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) dev_t old_decode_dev(u16 val)
{
return ((((val >> 8) & 255) << 20) | (val & 255));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) u32 new_encode_dev(dev_t dev)
{
unsigned major = ((unsigned int) ((dev) >> 20));
unsigned minor = ((unsigned int) ((dev) & ((1U << 20) - 1)));
return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) dev_t new_decode_dev(u32 dev)
{
unsigned major = (dev & 0xfff00) >> 8;
unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
return (((major) << 20) | (minor));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) dev_t huge_decode_dev(u64 dev)
{
return new_decode_dev(dev);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int sysv_valid_dev(dev_t dev)
{
return ((unsigned int) ((dev) >> 20)) < (1<<14) && ((unsigned int) ((dev) & ((1U << 20) - 1))) < (1<<18);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) u32 sysv_encode_dev(dev_t dev)
{
return ((unsigned int) ((dev) & ((1U << 20) - 1))) | (((unsigned int) ((dev) >> 20)) << 18);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned sysv_major(u32 dev)
{
return (dev >> 18) & 0x3fff;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned sysv_minor(u32 dev)
{
return dev & 0x3ffff;
}
# 8 "./include/linux/fs.h" 2
# 1 "./include/linux/dcache.h" 1







# 1 "./include/linux/rculist.h" 1
# 22 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void INIT_LIST_HEAD_RCU(struct list_head *list)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_87(void) ; if (!((sizeof(list->next) == sizeof(char) || sizeof(list->next) == sizeof(short) || sizeof(list->next) == sizeof(int) || sizeof(list->next) == sizeof(long)) || sizeof(list->next) == sizeof(long long))) __compiletime_assert_87(); } while (0); do { *(volatile typeof(list->next) *)&(list->next) = (list); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_88(void) ; if (!((sizeof(list->prev) == sizeof(char) || sizeof(list->prev) == sizeof(short) || sizeof(list->prev) == sizeof(int) || sizeof(list->prev) == sizeof(long)) || sizeof(list->prev) == sizeof(long long))) __compiletime_assert_88(); } while (0); do { *(volatile typeof(list->prev) *)&(list->prev) = (list); } while (0); } while (0);
}
# 76 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __list_add_rcu(struct list_head *new,
struct list_head *prev, struct list_head *next)
{
if (!__list_add_valid(new, prev, next))
return;

new->next = next;
new->prev = prev;
do { uintptr_t _r_a_p__v = (uintptr_t)(new); ; if (__builtin_constant_p(new) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_89(void) ; if (!((sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(char) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(short) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(int) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(long)) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(long long))) __compiletime_assert_89(); } while (0); do { *(volatile typeof(((*((struct list_head **)(&(prev)->next))))) *)&(((*((struct list_head **)(&(prev)->next))))) = ((typeof((*((struct list_head **)(&(prev)->next)))))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_90(void) ; if (!((sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long)))) __compiletime_assert_90(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_91(void) ; if (!((sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long)) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long long))) __compiletime_assert_91(); } while (0); do { *(volatile typeof(*&(*((struct list_head **)(&(prev)->next)))) *)&(*&(*((struct list_head **)(&(prev)->next)))) = ((typeof(*((typeof((*((struct list_head **)(&(prev)->next)))))_r_a_p__v)) *)((typeof((*((struct list_head **)(&(prev)->next)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
next->prev = new;
}
# 104 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_add_rcu(struct list_head *new, struct list_head *head)
{
__list_add_rcu(new, head, head->next);
}
# 125 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_add_tail_rcu(struct list_head *new,
struct list_head *head)
{
__list_add_rcu(new, head->prev, head);
}
# 155 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_del_rcu(struct list_head *entry)
{
__list_del_entry(entry);
entry->prev = ((void *) 0x122 + 0);
}
# 181 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_del_init_rcu(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_92(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_92(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (((void *)0)); } while (0); } while (0);
}
}
# 197 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_replace_rcu(struct list_head *old,
struct list_head *new)
{
new->next = old->next;
new->prev = old->prev;
do { uintptr_t _r_a_p__v = (uintptr_t)(new); ; if (__builtin_constant_p(new) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_93(void) ; if (!((sizeof(((*((struct list_head **)(&(new->prev)->next))))) == sizeof(char) || sizeof(((*((struct list_head **)(&(new->prev)->next))))) == sizeof(short) || sizeof(((*((struct list_head **)(&(new->prev)->next))))) == sizeof(int) || sizeof(((*((struct list_head **)(&(new->prev)->next))))) == sizeof(long)) || sizeof(((*((struct list_head **)(&(new->prev)->next))))) == sizeof(long long))) __compiletime_assert_93(); } while (0); do { *(volatile typeof(((*((struct list_head **)(&(new->prev)->next))))) *)&(((*((struct list_head **)(&(new->prev)->next))))) = ((typeof((*((struct list_head **)(&(new->prev)->next)))))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_94(void) ; if (!((sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(long)))) __compiletime_assert_94(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_95(void) ; if (!((sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(long)) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(long long))) __compiletime_assert_95(); } while (0); do { *(volatile typeof(*&(*((struct list_head **)(&(new->prev)->next)))) *)&(*&(*((struct list_head **)(&(new->prev)->next)))) = ((typeof(*((typeof((*((struct list_head **)(&(new->prev)->next)))))_r_a_p__v)) *)((typeof((*((struct list_head **)(&(new->prev)->next)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
new->next->prev = new;
old->prev = ((void *) 0x122 + 0);
}
# 226 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __list_splice_init_rcu(struct list_head *list,
struct list_head *prev,
struct list_head *next,
void (*sync)(void))
{
struct list_head *first = list->next;
struct list_head *last = list->prev;







INIT_LIST_HEAD_RCU(list);
# 249 "./include/linux/rculist.h"
sync();
__kcsan_check_access(&(*first), sizeof(*first), (1 << 0) | (1 << 3));
__kcsan_check_access(&(*last), sizeof(*last), (1 << 0) | (1 << 3));
# 261 "./include/linux/rculist.h"
last->next = next;
do { uintptr_t _r_a_p__v = (uintptr_t)(first); ; if (__builtin_constant_p(first) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_96(void) ; if (!((sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(char) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(short) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(int) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(long)) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(long long))) __compiletime_assert_96(); } while (0); do { *(volatile typeof(((*((struct list_head **)(&(prev)->next))))) *)&(((*((struct list_head **)(&(prev)->next))))) = ((typeof((*((struct list_head **)(&(prev)->next)))))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_97(void) ; if (!((sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long)))) __compiletime_assert_97(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_98(void) ; if (!((sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long)) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long long))) __compiletime_assert_98(); } while (0); do { *(volatile typeof(*&(*((struct list_head **)(&(prev)->next)))) *)&(*&(*((struct list_head **)(&(prev)->next)))) = ((typeof(*((typeof((*((struct list_head **)(&(prev)->next)))))_r_a_p__v)) *)((typeof((*((struct list_head **)(&(prev)->next)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
first->prev = prev;
next->prev = last;
}
# 274 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_splice_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
if (!list_empty(list))
__list_splice_init_rcu(list, head, head->next, sync);
}
# 289 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void list_splice_tail_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
if (!list_empty(list))
__list_splice_init_rcu(list, head->prev, head, sync);
}
# 511 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_del_rcu(struct hlist_node *n)
{
__hlist_del(n);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_99(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_99(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (((void *) 0x122 + 0)); } while (0); } while (0);
}
# 524 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *new)
{
struct hlist_node *next = old->next;

new->next = next;
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_100(void) ; if (!((sizeof(new->pprev) == sizeof(char) || sizeof(new->pprev) == sizeof(short) || sizeof(new->pprev) == sizeof(int) || sizeof(new->pprev) == sizeof(long)) || sizeof(new->pprev) == sizeof(long long))) __compiletime_assert_100(); } while (0); do { *(volatile typeof(new->pprev) *)&(new->pprev) = (old->pprev); } while (0); } while (0);
do { uintptr_t _r_a_p__v = (uintptr_t)(new); ; if (__builtin_constant_p(new) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_101(void) ; if (!((sizeof((*(struct hlist_node **)new->pprev)) == sizeof(char) || sizeof((*(struct hlist_node **)new->pprev)) == sizeof(short) || sizeof((*(struct hlist_node **)new->pprev)) == sizeof(int) || sizeof((*(struct hlist_node **)new->pprev)) == sizeof(long)) || sizeof((*(struct hlist_node **)new->pprev)) == sizeof(long long))) __compiletime_assert_101(); } while (0); do { *(volatile typeof((*(struct hlist_node **)new->pprev)) *)&((*(struct hlist_node **)new->pprev)) = ((typeof(*(struct hlist_node **)new->pprev))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_102(void) ; if (!((sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(char) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(short) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(int) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(long)))) __compiletime_assert_102(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_103(void) ; if (!((sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(char) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(short) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(int) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(long)) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(long long))) __compiletime_assert_103(); } while (0); do { *(volatile typeof(*&*(struct hlist_node **)new->pprev) *)&(*&*(struct hlist_node **)new->pprev) = ((typeof(*((typeof(*(struct hlist_node **)new->pprev))_r_a_p__v)) *)((typeof(*(struct hlist_node **)new->pprev))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
if (next)
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_104(void) ; if (!((sizeof(new->next->pprev) == sizeof(char) || sizeof(new->next->pprev) == sizeof(short) || sizeof(new->next->pprev) == sizeof(int) || sizeof(new->next->pprev) == sizeof(long)) || sizeof(new->next->pprev) == sizeof(long long))) __compiletime_assert_104(); } while (0); do { *(volatile typeof(new->next->pprev) *)&(new->next->pprev) = (&new->next); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_105(void) ; if (!((sizeof(old->pprev) == sizeof(char) || sizeof(old->pprev) == sizeof(short) || sizeof(old->pprev) == sizeof(int) || sizeof(old->pprev) == sizeof(long)) || sizeof(old->pprev) == sizeof(long long))) __compiletime_assert_105(); } while (0); do { *(volatile typeof(old->pprev) *)&(old->pprev) = (((void *) 0x122 + 0)); } while (0); } while (0);
}
# 547 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right)
{
struct hlist_node *node1 = left->first;
struct hlist_node *node2 = right->first;

do { uintptr_t _r_a_p__v = (uintptr_t)(node2); ; if (__builtin_constant_p(node2) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_106(void) ; if (!((sizeof((left->first)) == sizeof(char) || sizeof((left->first)) == sizeof(short) || sizeof((left->first)) == sizeof(int) || sizeof((left->first)) == sizeof(long)) || sizeof((left->first)) == sizeof(long long))) __compiletime_assert_106(); } while (0); do { *(volatile typeof((left->first)) *)&((left->first)) = ((typeof(left->first))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_107(void) ; if (!((sizeof(*&left->first) == sizeof(char) || sizeof(*&left->first) == sizeof(short) || sizeof(*&left->first) == sizeof(int) || sizeof(*&left->first) == sizeof(long)))) __compiletime_assert_107(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_108(void) ; if (!((sizeof(*&left->first) == sizeof(char) || sizeof(*&left->first) == sizeof(short) || sizeof(*&left->first) == sizeof(int) || sizeof(*&left->first) == sizeof(long)) || sizeof(*&left->first) == sizeof(long long))) __compiletime_assert_108(); } while (0); do { *(volatile typeof(*&left->first) *)&(*&left->first) = ((typeof(*((typeof(left->first))_r_a_p__v)) *)((typeof(left->first))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
do { uintptr_t _r_a_p__v = (uintptr_t)(node1); ; if (__builtin_constant_p(node1) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_109(void) ; if (!((sizeof((right->first)) == sizeof(char) || sizeof((right->first)) == sizeof(short) || sizeof((right->first)) == sizeof(int) || sizeof((right->first)) == sizeof(long)) || sizeof((right->first)) == sizeof(long long))) __compiletime_assert_109(); } while (0); do { *(volatile typeof((right->first)) *)&((right->first)) = ((typeof(right->first))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_110(void) ; if (!((sizeof(*&right->first) == sizeof(char) || sizeof(*&right->first) == sizeof(short) || sizeof(*&right->first) == sizeof(int) || sizeof(*&right->first) == sizeof(long)))) __compiletime_assert_110(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_111(void) ; if (!((sizeof(*&right->first) == sizeof(char) || sizeof(*&right->first) == sizeof(short) || sizeof(*&right->first) == sizeof(int) || sizeof(*&right->first) == sizeof(long)) || sizeof(*&right->first) == sizeof(long long))) __compiletime_assert_111(); } while (0); do { *(volatile typeof(*&right->first) *)&(*&right->first) = ((typeof(*((typeof(right->first))_r_a_p__v)) *)((typeof(right->first))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_112(void) ; if (!((sizeof(node2->pprev) == sizeof(char) || sizeof(node2->pprev) == sizeof(short) || sizeof(node2->pprev) == sizeof(int) || sizeof(node2->pprev) == sizeof(long)) || sizeof(node2->pprev) == sizeof(long long))) __compiletime_assert_112(); } while (0); do { *(volatile typeof(node2->pprev) *)&(node2->pprev) = (&left->first); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_113(void) ; if (!((sizeof(node1->pprev) == sizeof(char) || sizeof(node1->pprev) == sizeof(short) || sizeof(node1->pprev) == sizeof(int) || sizeof(node1->pprev) == sizeof(long)) || sizeof(node1->pprev) == sizeof(long long))) __compiletime_assert_113(); } while (0); do { *(volatile typeof(node1->pprev) *)&(node1->pprev) = (&right->first); } while (0); } while (0);
}
# 584 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_add_head_rcu(struct hlist_node *n,
struct hlist_head *h)
{
struct hlist_node *first = h->first;

n->next = first;
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_114(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_114(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&h->first); } while (0); } while (0);
do { uintptr_t _r_a_p__v = (uintptr_t)(n); ; if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_115(void) ; if (!((sizeof(((*((struct hlist_node **)(&(h)->first))))) == sizeof(char) || sizeof(((*((struct hlist_node **)(&(h)->first))))) == sizeof(short) || sizeof(((*((struct hlist_node **)(&(h)->first))))) == sizeof(int) || sizeof(((*((struct hlist_node **)(&(h)->first))))) == sizeof(long)) || sizeof(((*((struct hlist_node **)(&(h)->first))))) == sizeof(long long))) __compiletime_assert_115(); } while (0); do { *(volatile typeof(((*((struct hlist_node **)(&(h)->first))))) *)&(((*((struct hlist_node **)(&(h)->first))))) = ((typeof((*((struct hlist_node **)(&(h)->first)))))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_116(void) ; if (!((sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(long)))) __compiletime_assert_116(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_117(void) ; if (!((sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(long)) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(long long))) __compiletime_assert_117(); } while (0); do { *(volatile typeof(*&(*((struct hlist_node **)(&(h)->first)))) *)&(*&(*((struct hlist_node **)(&(h)->first)))) = ((typeof(*((typeof((*((struct hlist_node **)(&(h)->first)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)(&(h)->first)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
if (first)
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_118(void) ; if (!((sizeof(first->pprev) == sizeof(char) || sizeof(first->pprev) == sizeof(short) || sizeof(first->pprev) == sizeof(int) || sizeof(first->pprev) == sizeof(long)) || sizeof(first->pprev) == sizeof(long long))) __compiletime_assert_118(); } while (0); do { *(volatile typeof(first->pprev) *)&(first->pprev) = (&n->next); } while (0); } while (0);
}
# 615 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_add_tail_rcu(struct hlist_node *n,
struct hlist_head *h)
{
struct hlist_node *i, *last = ((void *)0);


for (i = h->first; i; i = i->next)
last = i;

if (last) {
n->next = last->next;
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_119(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_119(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&last->next); } while (0); } while (0);
do { uintptr_t _r_a_p__v = (uintptr_t)(n); ; if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_120(void) ; if (!((sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(char) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(short) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(int) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(long)) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(long long))) __compiletime_assert_120(); } while (0); do { *(volatile typeof(((*((struct hlist_node **)(&(last)->next))))) *)&(((*((struct hlist_node **)(&(last)->next))))) = ((typeof((*((struct hlist_node **)(&(last)->next)))))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_121(void) ; if (!((sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(long)))) __compiletime_assert_121(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_122(void) ; if (!((sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(long)) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(long long))) __compiletime_assert_122(); } while (0); do { *(volatile typeof(*&(*((struct hlist_node **)(&(last)->next)))) *)&(*&(*((struct hlist_node **)(&(last)->next)))) = ((typeof(*((typeof((*((struct hlist_node **)(&(last)->next)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)(&(last)->next)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
} else {
hlist_add_head_rcu(n, h);
}
}
# 651 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_add_before_rcu(struct hlist_node *n,
struct hlist_node *next)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_123(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_123(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (next->pprev); } while (0); } while (0);
n->next = next;
do { uintptr_t _r_a_p__v = (uintptr_t)(n); ; if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_124(void) ; if (!((sizeof(((*((struct hlist_node **)((n)->pprev))))) == sizeof(char) || sizeof(((*((struct hlist_node **)((n)->pprev))))) == sizeof(short) || sizeof(((*((struct hlist_node **)((n)->pprev))))) == sizeof(int) || sizeof(((*((struct hlist_node **)((n)->pprev))))) == sizeof(long)) || sizeof(((*((struct hlist_node **)((n)->pprev))))) == sizeof(long long))) __compiletime_assert_124(); } while (0); do { *(volatile typeof(((*((struct hlist_node **)((n)->pprev))))) *)&(((*((struct hlist_node **)((n)->pprev))))) = ((typeof((*((struct hlist_node **)((n)->pprev)))))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_125(void) ; if (!((sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(long)))) __compiletime_assert_125(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_126(void) ; if (!((sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(long)) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(long long))) __compiletime_assert_126(); } while (0); do { *(volatile typeof(*&(*((struct hlist_node **)((n)->pprev)))) *)&(*&(*((struct hlist_node **)((n)->pprev)))) = ((typeof(*((typeof((*((struct hlist_node **)((n)->pprev)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)((n)->pprev)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_127(void) ; if (!((sizeof(next->pprev) == sizeof(char) || sizeof(next->pprev) == sizeof(short) || sizeof(next->pprev) == sizeof(int) || sizeof(next->pprev) == sizeof(long)) || sizeof(next->pprev) == sizeof(long long))) __compiletime_assert_127(); } while (0); do { *(volatile typeof(next->pprev) *)&(next->pprev) = (&n->next); } while (0); } while (0);
}
# 678 "./include/linux/rculist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_add_behind_rcu(struct hlist_node *n,
struct hlist_node *prev)
{
n->next = prev->next;
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_128(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_128(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&prev->next); } while (0); } while (0);
do { uintptr_t _r_a_p__v = (uintptr_t)(n); ; if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_129(void) ; if (!((sizeof(((*((struct hlist_node **)(&(prev)->next))))) == sizeof(char) || sizeof(((*((struct hlist_node **)(&(prev)->next))))) == sizeof(short) || sizeof(((*((struct hlist_node **)(&(prev)->next))))) == sizeof(int) || sizeof(((*((struct hlist_node **)(&(prev)->next))))) == sizeof(long)) || sizeof(((*((struct hlist_node **)(&(prev)->next))))) == sizeof(long long))) __compiletime_assert_129(); } while (0); do { *(volatile typeof(((*((struct hlist_node **)(&(prev)->next))))) *)&(((*((struct hlist_node **)(&(prev)->next))))) = ((typeof((*((struct hlist_node **)(&(prev)->next)))))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_130(void) ; if (!((sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(long)))) __compiletime_assert_130(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_131(void) ; if (!((sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(long)) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(long long))) __compiletime_assert_131(); } while (0); do { *(volatile typeof(*&(*((struct hlist_node **)(&(prev)->next)))) *)&(*&(*((struct hlist_node **)(&(prev)->next)))) = ((typeof(*((typeof((*((struct hlist_node **)(&(prev)->next)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)(&(prev)->next)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
if (n->next)
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_132(void) ; if (!((sizeof(n->next->pprev) == sizeof(char) || sizeof(n->next->pprev) == sizeof(short) || sizeof(n->next->pprev) == sizeof(int) || sizeof(n->next->pprev) == sizeof(long)) || sizeof(n->next->pprev) == sizeof(long long))) __compiletime_assert_132(); } while (0); do { *(volatile typeof(n->next->pprev) *)&(n->next->pprev) = (&n->next); } while (0); } while (0);
}
# 9 "./include/linux/dcache.h" 2
# 1 "./include/linux/rculist_bl.h" 1







# 1 "./include/linux/list_bl.h" 1





# 1 "./include/linux/bit_spinlock.h" 1
# 16 "./include/linux/bit_spinlock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bit_spin_lock(int bitnum, unsigned long *addr)
{







do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);

while (__builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 0)) {
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
do {
cpu_relax();
} while (arch_test_bit(bitnum, addr));
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
}

(void)0;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int bit_spin_trylock(int bitnum, unsigned long *addr)
{
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);

if (__builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 0)) {
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
return 0;
}

(void)0;
return 1;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bit_spin_unlock(int bitnum, unsigned long *addr)
{

do { if (__builtin_expect(!!(!arch_test_bit(bitnum, addr)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/bit_spinlock.h"), "i" (60), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);


clear_bit_unlock(bitnum, addr);

do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
(void)0;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __bit_spin_unlock(int bitnum, unsigned long *addr)
{

do { if (__builtin_expect(!!(!arch_test_bit(bitnum, addr)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/bit_spinlock.h"), "i" (77), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);


__clear_bit_unlock(bitnum, addr);

do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
(void)0;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int bit_spin_is_locked(int bitnum, unsigned long *addr)
{

return arch_test_bit(bitnum, addr);





}
# 7 "./include/linux/list_bl.h" 2
# 34 "./include/linux/list_bl.h"
struct hlist_bl_head {
struct hlist_bl_node *first;
};

struct hlist_bl_node {
struct hlist_bl_node *next, **pprev;
};



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
{
h->next = ((void *)0);
h->pprev = ((void *)0);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool hlist_bl_unhashed(const struct hlist_bl_node *h)
{
return !h->pprev;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
{
return (struct hlist_bl_node *)
((unsigned long)h->first & ~1UL);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_bl_set_first(struct hlist_bl_head *h,
struct hlist_bl_node *n)
{
do { if (__builtin_expect(!!((unsigned long)n & 1UL), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/list_bl.h"), "i" (66), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
do { if (__builtin_expect(!!(((unsigned long)h->first & 1UL) != 1UL), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/list_bl.h"), "i" (68), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);

h->first = (struct hlist_bl_node *)((unsigned long)n | 1UL);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool hlist_bl_empty(const struct hlist_bl_head *h)
{
return !((unsigned long)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_133(void) ; if (!((sizeof(h->first) == sizeof(char) || sizeof(h->first) == sizeof(short) || sizeof(h->first) == sizeof(int) || sizeof(h->first) == sizeof(long)) || sizeof(h->first) == sizeof(long long))) __compiletime_assert_133(); } while (0); (*(const volatile typeof( _Generic((h->first), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->first))) *)&(h->first)); }) & ~1UL);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_bl_add_head(struct hlist_bl_node *n,
struct hlist_bl_head *h)
{
struct hlist_bl_node *first = hlist_bl_first(h);

n->next = first;
if (first)
first->pprev = &n->next;
n->pprev = &h->first;
hlist_bl_set_first(h, n);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_bl_add_before(struct hlist_bl_node *n,
struct hlist_bl_node *next)
{
struct hlist_bl_node **pprev = next->pprev;

n->pprev = pprev;
n->next = next;
next->pprev = &n->next;


do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_134(void) ; if (!((sizeof(*pprev) == sizeof(char) || sizeof(*pprev) == sizeof(short) || sizeof(*pprev) == sizeof(int) || sizeof(*pprev) == sizeof(long)) || sizeof(*pprev) == sizeof(long long))) __compiletime_assert_134(); } while (0); do { *(volatile typeof(*pprev) *)&(*pprev) = ((struct hlist_bl_node *) ((uintptr_t)n | ((uintptr_t)*pprev & 1UL))); } while (0); } while (0);


}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_bl_add_behind(struct hlist_bl_node *n,
struct hlist_bl_node *prev)
{
n->next = prev->next;
n->pprev = &prev->next;
prev->next = n;

if (n->next)
n->next->pprev = &n->next;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __hlist_bl_del(struct hlist_bl_node *n)
{
struct hlist_bl_node *next = n->next;
struct hlist_bl_node **pprev = n->pprev;

do { if (__builtin_expect(!!((unsigned long)n & 1UL), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/list_bl.h"), "i" (120), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);


do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_135(void) ; if (!((sizeof(*pprev) == sizeof(char) || sizeof(*pprev) == sizeof(short) || sizeof(*pprev) == sizeof(int) || sizeof(*pprev) == sizeof(long)) || sizeof(*pprev) == sizeof(long long))) __compiletime_assert_135(); } while (0); do { *(volatile typeof(*pprev) *)&(*pprev) = ((struct hlist_bl_node *) ((unsigned long)next | ((unsigned long)*pprev & 1UL))); } while (0); } while (0);



if (next)
next->pprev = pprev;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_bl_del(struct hlist_bl_node *n)
{
__hlist_bl_del(n);
n->next = ((void *) 0x100 + 0);
n->pprev = ((void *) 0x122 + 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_bl_del_init(struct hlist_bl_node *n)
{
if (!hlist_bl_unhashed(n)) {
__hlist_bl_del(n);
INIT_HLIST_BL_NODE(n);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_bl_lock(struct hlist_bl_head *b)
{
bit_spin_lock(0, (unsigned long *)b);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_bl_unlock(struct hlist_bl_head *b)
{
__bit_spin_unlock(0, (unsigned long *)b);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool hlist_bl_is_locked(struct hlist_bl_head *b)
{
return bit_spin_is_locked(0, (unsigned long *)b);
}
# 9 "./include/linux/rculist_bl.h" 2


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
struct hlist_bl_node *n)
{
do { if (__builtin_expect(!!((unsigned long)n & 1UL), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/rculist_bl.h"), "i" (14), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
do { if (__builtin_expect(!!(((unsigned long)h->first & 1UL) != 1UL), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/rculist_bl.h"), "i" (16), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);

do { uintptr_t _r_a_p__v = (uintptr_t)((struct hlist_bl_node *)((unsigned long)n | 1UL)); ; if (__builtin_constant_p((struct hlist_bl_node *)((unsigned long)n | 1UL)) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_136(void) ; if (!((sizeof((h->first)) == sizeof(char) || sizeof((h->first)) == sizeof(short) || sizeof((h->first)) == sizeof(int) || sizeof((h->first)) == sizeof(long)) || sizeof((h->first)) == sizeof(long long))) __compiletime_assert_136(); } while (0); do { *(volatile typeof((h->first)) *)&((h->first)) = ((typeof(h->first))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_137(void) ; if (!((sizeof(*&h->first) == sizeof(char) || sizeof(*&h->first) == sizeof(short) || sizeof(*&h->first) == sizeof(int) || sizeof(*&h->first) == sizeof(long)))) __compiletime_assert_137(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_138(void) ; if (!((sizeof(*&h->first) == sizeof(char) || sizeof(*&h->first) == sizeof(short) || sizeof(*&h->first) == sizeof(int) || sizeof(*&h->first) == sizeof(long)) || sizeof(*&h->first) == sizeof(long long))) __compiletime_assert_138(); } while (0); do { *(volatile typeof(*&h->first) *)&(*&h->first) = ((typeof(*((typeof(h->first))_r_a_p__v)) *)((typeof(h->first))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
{
return (struct hlist_bl_node *)
((unsigned long)({ typeof(*(h->first)) *__UNIQUE_ID_rcu139 = (typeof(*(h->first)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_140(void) ; if (!((sizeof((h->first)) == sizeof(char) || sizeof((h->first)) == sizeof(short) || sizeof((h->first)) == sizeof(int) || sizeof((h->first)) == sizeof(long)) || sizeof((h->first)) == sizeof(long long))) __compiletime_assert_140(); } while (0); (*(const volatile typeof( _Generic(((h->first)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((h->first)))) *)&((h->first))); }); do { } while (0 && (!((hlist_bl_is_locked(h)) || rcu_read_lock_held()))); ; ((typeof(*(h->first)) *)(__UNIQUE_ID_rcu139)); }) & ~1UL);
}
# 46 "./include/linux/rculist_bl.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_bl_del_rcu(struct hlist_bl_node *n)
{
__hlist_bl_del(n);
n->pprev = ((void *) 0x122 + 0);
}
# 71 "./include/linux/rculist_bl.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
struct hlist_bl_head *h)
{
struct hlist_bl_node *first;


first = hlist_bl_first(h);

n->next = first;
if (first)
first->pprev = &n->next;
n->pprev = &h->first;


hlist_bl_set_first_rcu(h, n);
}
# 10 "./include/linux/dcache.h" 2




# 1 "./include/linux/lockref.h" 1
# 25 "./include/linux/lockref.h"
struct lockref {
union {



struct {
spinlock_t lock;
int count;
};
};
};

extern void lockref_get(struct lockref *);
extern int lockref_put_return(struct lockref *);
extern int lockref_get_not_zero(struct lockref *);
extern int lockref_put_not_zero(struct lockref *);
extern int lockref_get_or_lock(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);

extern void lockref_mark_dead(struct lockref *);
extern int lockref_get_not_dead(struct lockref *);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __lockref_is_dead(const struct lockref *l)
{
return ((int)l->count < 0);
}
# 15 "./include/linux/dcache.h" 2
# 1 "./include/linux/stringhash.h" 1






# 1 "./include/linux/hash.h" 1





# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 7 "./include/linux/hash.h" 2
# 60 "./include/linux/hash.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 __hash_32_generic(u32 val)
{
return val * 0x61C88647;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 hash_32(u32 val, unsigned int bits)
{

return __hash_32_generic(val) >> (32 - bits);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) u32 hash_64_generic(u64 val, unsigned int bits)
{


return val * 0x61C8864680B583EBull >> (64 - bits);




}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 hash_ptr(const void *ptr, unsigned int bits)
{
return hash_64_generic((unsigned long)ptr, bits);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 hash32_ptr(const void *ptr)
{
unsigned long val = (unsigned long)ptr;


val ^= (val >> 32);

return (u32)val;
}
# 8 "./include/linux/stringhash.h" 2
# 42 "./include/linux/stringhash.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
partial_name_hash(unsigned long c, unsigned long prevhash)
{
return (prevhash + (c << 4) + (c >> 4)) * 11;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int end_name_hash(unsigned long hash)
{
return hash_64_generic(hash, 32);
}
# 66 "./include/linux/stringhash.h"
extern unsigned int __attribute__((__pure__)) full_name_hash(const void *salt, const char *, unsigned int);
# 77 "./include/linux/stringhash.h"
extern u64 __attribute__((__pure__)) hashlen_string(const void *salt, const char *name);
# 16 "./include/linux/dcache.h" 2


struct path;
struct vfsmount;
# 48 "./include/linux/dcache.h"
struct qstr {
union {
struct {
u32 hash; u32 len;
};
u64 hash_len;
};
const unsigned char *name;
};



extern const struct qstr empty_name;
extern const struct qstr slash_name;
extern const struct qstr dotdot_name;
# 81 "./include/linux/dcache.h"
struct dentry {

unsigned int d_flags;
seqcount_spinlock_t d_seq;
struct hlist_bl_node d_hash;
struct dentry *d_parent;
struct qstr d_name;
struct inode *d_inode;

unsigned char d_iname[32];


struct lockref d_lockref;
const struct dentry_operations *d_op;
struct super_block *d_sb;
unsigned long d_time;
void *d_fsdata;

union {
struct list_head d_lru;
wait_queue_head_t *d_wait;
};
struct list_head d_child;
struct list_head d_subdirs;



union {
struct hlist_node d_alias;
struct hlist_bl_node d_in_lookup_hash;
struct callback_head d_rcu;
} d_u;
} ;







enum dentry_d_lock_class
{
DENTRY_D_LOCK_NORMAL,
DENTRY_D_LOCK_NESTED
};

struct dentry_operations {
int (*d_revalidate)(struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, struct qstr *);
int (*d_compare)(const struct dentry *,
unsigned int, const char *, const struct qstr *);
int (*d_delete)(const struct dentry *);
int (*d_init)(struct dentry *);
void (*d_release)(struct dentry *);
void (*d_prune)(struct dentry *);
void (*d_iput)(struct dentry *, struct inode *);
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
struct dentry *(*d_real)(struct dentry *, const struct inode *);
} __attribute__((__aligned__((1 << 6))));
# 215 "./include/linux/dcache.h"
extern seqlock_t rename_lock;




extern void d_instantiate(struct dentry *, struct inode *);
extern void d_instantiate_new(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op);


extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_alloc_anon(struct super_block *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
wait_queue_head_t *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
extern struct dentry * d_obtain_root(struct inode *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
extern void shrink_dcache_for_umount(struct super_block *);
extern void d_invalidate(struct dentry *);


extern struct dentry * d_make_root(struct inode *);


extern void d_genocide(struct dentry *);

extern void d_tmpfile(struct dentry *, struct inode *);

extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);

extern struct dentry *d_find_alias_rcu(struct inode *);


extern int path_has_submounts(const struct path *);




extern void d_rehash(struct dentry *);

extern void d_add(struct dentry *, struct inode *);


extern void d_move(struct dentry *, struct dentry *);
extern void d_exchange(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);


extern struct dentry *d_lookup(const struct dentry *, const struct qstr *);
extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
const struct qstr *name, unsigned *seq);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned d_count(const struct dentry *dentry)
{
return dentry->d_lockref.count;
}




extern __attribute__((__format__(printf, 4, 5)))
char *dynamic_dname(struct dentry *, char *, int, const char *, ...);

extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
extern char *d_path(const struct path *, char *, int);
extern char *dentry_path_raw(const struct dentry *, char *, int);
extern char *dentry_path(const struct dentry *, char *, int);
# 307 "./include/linux/dcache.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dentry *dget_dlock(struct dentry *dentry)
{
if (dentry)
dentry->d_lockref.count++;
return dentry;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dentry *dget(struct dentry *dentry)
{
if (dentry)
lockref_get(&dentry->d_lockref);
return dentry;
}

extern struct dentry *dget_parent(struct dentry *dentry);
# 330 "./include/linux/dcache.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int d_unhashed(const struct dentry *dentry)
{
return hlist_bl_unhashed(&dentry->d_hash);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int d_unlinked(const struct dentry *dentry)
{
return d_unhashed(dentry) && !((dentry) == (dentry)->d_parent);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cant_mount(const struct dentry *dentry)
{
return (dentry->d_flags & 0x00000100);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dont_mount(struct dentry *dentry)
{
spin_lock(&dentry->d_lockref.lock);
dentry->d_flags |= 0x00000100;
spin_unlock(&dentry->d_lockref.lock);
}

extern void __d_lookup_done(struct dentry *);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int d_in_lookup(const struct dentry *dentry)
{
return dentry->d_flags & 0x10000000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void d_lookup_done(struct dentry *dentry)
{
if (__builtin_expect(!!(d_in_lookup(dentry)), 0)) {
spin_lock(&dentry->d_lockref.lock);
__d_lookup_done(dentry);
spin_unlock(&dentry->d_lockref.lock);
}
}

extern void dput(struct dentry *);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_managed(const struct dentry *dentry)
{
return dentry->d_flags & (0x00010000|0x00020000|0x00040000);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_mountpoint(const struct dentry *dentry)
{
return dentry->d_flags & 0x00010000;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned __d_entry_type(const struct dentry *dentry)
{
return dentry->d_flags & 0x00700000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_is_miss(const struct dentry *dentry)
{
return __d_entry_type(dentry) == 0x00000000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_is_whiteout(const struct dentry *dentry)
{
return __d_entry_type(dentry) == 0x00100000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_can_lookup(const struct dentry *dentry)
{
return __d_entry_type(dentry) == 0x00200000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_is_autodir(const struct dentry *dentry)
{
return __d_entry_type(dentry) == 0x00300000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_is_dir(const struct dentry *dentry)
{
return d_can_lookup(dentry) || d_is_autodir(dentry);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_is_symlink(const struct dentry *dentry)
{
return __d_entry_type(dentry) == 0x00600000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_is_reg(const struct dentry *dentry)
{
return __d_entry_type(dentry) == 0x00400000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_is_special(const struct dentry *dentry)
{
return __d_entry_type(dentry) == 0x00500000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_is_file(const struct dentry *dentry)
{
return d_is_reg(dentry) || d_is_special(dentry);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_is_negative(const struct dentry *dentry)
{

return d_is_miss(dentry);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_flags_negative(unsigned flags)
{
return (flags & 0x00700000) == 0x00000000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_is_positive(const struct dentry *dentry)
{
return !d_is_negative(dentry);
}
# 464 "./include/linux/dcache.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_really_is_negative(const struct dentry *dentry)
{
return dentry->d_inode == ((void *)0);
}
# 482 "./include/linux/dcache.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_really_is_positive(const struct dentry *dentry)
{
return dentry->d_inode != ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int simple_positive(const struct dentry *dentry)
{
return d_really_is_positive(dentry) && !d_unhashed(dentry);
}

extern void d_set_fallthru(struct dentry *dentry);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool d_is_fallthru(const struct dentry *dentry)
{
return dentry->d_flags & 0x01000000;
}


extern int sysctl_vfs_cache_pressure;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long vfs_pressure_ratio(unsigned long val)
{
return ( { typeof(val) quot = (val) / (100); typeof(val) rem = (val) % (100); (quot * (sysctl_vfs_cache_pressure)) + ((rem * (sysctl_vfs_cache_pressure)) / (100)); } );
}
# 514 "./include/linux/dcache.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inode *d_inode(const struct dentry *dentry)
{
return dentry->d_inode;
}
# 526 "./include/linux/dcache.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inode *d_inode_rcu(const struct dentry *dentry)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_141(void) ; if (!((sizeof(dentry->d_inode) == sizeof(char) || sizeof(dentry->d_inode) == sizeof(short) || sizeof(dentry->d_inode) == sizeof(int) || sizeof(dentry->d_inode) == sizeof(long)) || sizeof(dentry->d_inode) == sizeof(long long))) __compiletime_assert_141(); } while (0); (*(const volatile typeof( _Generic((dentry->d_inode), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (dentry->d_inode))) *)&(dentry->d_inode)); });
}
# 541 "./include/linux/dcache.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inode *d_backing_inode(const struct dentry *upper)
{
struct inode *inode = upper->d_inode;

return inode;
}
# 558 "./include/linux/dcache.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dentry *d_backing_dentry(struct dentry *upper)
{
return upper;
}
# 573 "./include/linux/dcache.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dentry *d_real(struct dentry *dentry,
const struct inode *inode)
{
if (__builtin_expect(!!(dentry->d_flags & 0x04000000), 0))
return dentry->d_op->d_real(dentry, inode);
else
return dentry;
}
# 589 "./include/linux/dcache.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inode *d_real_inode(const struct dentry *dentry)
{

return d_backing_inode(d_real((struct dentry *) dentry, ((void *)0)));
}

struct name_snapshot {
struct qstr name;
unsigned char inline_name[32];
};
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
void release_dentry_name_snapshot(struct name_snapshot *);
# 9 "./include/linux/fs.h" 2
# 1 "./include/linux/path.h" 1




struct dentry;
struct vfsmount;

struct path {
struct vfsmount *mnt;
struct dentry *dentry;
} ;

extern void path_get(const struct path *);
extern void path_put(const struct path *);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int path_equal(const struct path *path1, const struct path *path2)
{
return path1->mnt == path2->mnt && path1->dentry == path2->dentry;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void path_put_init(struct path *path)
{
path_put(path);
*path = (struct path) { };
}
# 10 "./include/linux/fs.h" 2



# 1 "./include/linux/list_lru.h" 1
# 12 "./include/linux/list_lru.h"
# 1 "./include/linux/nodemask.h" 1
# 98 "./include/linux/nodemask.h"
typedef struct { unsigned long bits[((((1 << 0)) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
# 109 "./include/linux/nodemask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
{
return m ? (1 << 0) : 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
{
return m ? m->bits : ((void *)0);
}
# 128 "./include/linux/nodemask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __node_set(int node, volatile nodemask_t *dstp)
{
set_bit(node, dstp->bits);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __node_clear(int node, volatile nodemask_t *dstp)
{
clear_bit(node, dstp->bits);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
{
bitmap_fill(dstp->bits, nbits);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
{
bitmap_zero(dstp->bits, nbits);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __node_test_and_set(int node, nodemask_t *addr)
{
return test_and_set_bit(node, addr->bits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __nodes_complement(nodemask_t *dstp,
const nodemask_t *srcp, unsigned int nbits)
{
bitmap_complement(dstp->bits, srcp->bits, nbits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __nodes_equal(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __nodes_intersects(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __nodes_subset(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_full(srcp->bits, nbits);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __nodes_shift_right(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __nodes_shift_left(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __first_node(const nodemask_t *srcp)
{
return __builtin_choose_expr(((!!(sizeof((typeof((int)((1 << 0))) *)1 == (typeof((int)(find_first_bit(srcp->bits, (1 << 0)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)((1 << 0))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(find_first_bit(srcp->bits, (1 << 0)))) * 0l)) : (int *)8))))), (((int)((1 << 0))) < ((int)(find_first_bit(srcp->bits, (1 << 0)))) ? ((int)((1 << 0))) : ((int)(find_first_bit(srcp->bits, (1 << 0))))), ({ typeof((int)((1 << 0))) __UNIQUE_ID___x142 = ((int)((1 << 0))); typeof((int)(find_first_bit(srcp->bits, (1 << 0)))) __UNIQUE_ID___y143 = ((int)(find_first_bit(srcp->bits, (1 << 0)))); ((__UNIQUE_ID___x142) < (__UNIQUE_ID___y143) ? (__UNIQUE_ID___x142) : (__UNIQUE_ID___y143)); }));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __next_node(int n, const nodemask_t *srcp)
{
return __builtin_choose_expr(((!!(sizeof((typeof((int)((1 << 0))) *)1 == (typeof((int)(find_next_bit(srcp->bits, (1 << 0), n+1))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)((1 << 0))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(find_next_bit(srcp->bits, (1 << 0), n+1))) * 0l)) : (int *)8))))), (((int)((1 << 0))) < ((int)(find_next_bit(srcp->bits, (1 << 0), n+1))) ? ((int)((1 << 0))) : ((int)(find_next_bit(srcp->bits, (1 << 0), n+1)))), ({ typeof((int)((1 << 0))) __UNIQUE_ID___x144 = ((int)((1 << 0))); typeof((int)(find_next_bit(srcp->bits, (1 << 0), n+1))) __UNIQUE_ID___y145 = ((int)(find_next_bit(srcp->bits, (1 << 0), n+1))); ((__UNIQUE_ID___x144) < (__UNIQUE_ID___y145) ? (__UNIQUE_ID___x144) : (__UNIQUE_ID___y145)); }));
}






int __next_node_in(int node, const nodemask_t *srcp);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void init_nodemask_of_node(nodemask_t *mask, int node)
{
__nodes_clear(&(*mask), (1 << 0));
__node_set((node), &(*mask));
}
# 299 "./include/linux/nodemask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __first_unset_node(const nodemask_t *maskp)
{
return __builtin_choose_expr(((!!(sizeof((typeof((int)((1 << 0))) *)1 == (typeof((int)(find_first_zero_bit(maskp->bits, (1 << 0)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)((1 << 0))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(find_first_zero_bit(maskp->bits, (1 << 0)))) * 0l)) : (int *)8))))), (((int)((1 << 0))) < ((int)(find_first_zero_bit(maskp->bits, (1 << 0)))) ? ((int)((1 << 0))) : ((int)(find_first_zero_bit(maskp->bits, (1 << 0))))), ({ typeof((int)((1 << 0))) __UNIQUE_ID___x146 = ((int)((1 << 0))); typeof((int)(find_first_zero_bit(maskp->bits, (1 << 0)))) __UNIQUE_ID___y147 = ((int)(find_first_zero_bit(maskp->bits, (1 << 0)))); ((__UNIQUE_ID___x146) < (__UNIQUE_ID___y147) ? (__UNIQUE_ID___x146) : (__UNIQUE_ID___y147)); }));

}
# 333 "./include/linux/nodemask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __nodemask_parse_user(const char *buf, int len,
nodemask_t *dstp, int nbits)
{
return bitmap_parse_user(buf, len, dstp->bits, nbits);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
{
return bitmap_parselist(buf, dstp->bits, nbits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __node_remap(int oldbit,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
const nodemask_t *relmapp, int nbits)
{
bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
int sz, int nbits)
{
bitmap_fold(dstp->bits, origp->bits, sz, nbits);
}
# 391 "./include/linux/nodemask.h"
enum node_states {
N_POSSIBLE,
N_ONLINE,
N_NORMAL_MEMORY,



N_HIGH_MEMORY = N_NORMAL_MEMORY,

N_MEMORY,
N_CPU,
N_GENERIC_INITIATOR,
NR_NODE_STATES
};






extern nodemask_t node_states[NR_NODE_STATES];
# 465 "./include/linux/nodemask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int node_state(int node, enum node_states state)
{
return node == 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void node_set_state(int node, enum node_states state)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void node_clear_state(int node, enum node_states state)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int num_node_state(enum node_states state)
{
return 1;
}
# 500 "./include/linux/nodemask.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int node_random(const nodemask_t *mask)
{
return 0;
}
# 532 "./include/linux/nodemask.h"
struct nodemask_scratch {
nodemask_t mask1;
nodemask_t mask2;
};
# 13 "./include/linux/list_lru.h" 2
# 1 "./include/linux/shrinker.h" 1
# 12 "./include/linux/shrinker.h"
struct shrink_control {
gfp_t gfp_mask;


int nid;






unsigned long nr_to_scan;






unsigned long nr_scanned;


struct mem_cgroup *memcg;
};
# 60 "./include/linux/shrinker.h"
struct shrinker {
unsigned long (*count_objects)(struct shrinker *,
struct shrink_control *sc);
unsigned long (*scan_objects)(struct shrinker *,
struct shrink_control *sc);

long batch;
int seeks;
unsigned flags;


struct list_head list;





atomic_long_t *nr_deferred;
};
# 91 "./include/linux/shrinker.h"
extern int prealloc_shrinker(struct shrinker *shrinker);
extern void register_shrinker_prepared(struct shrinker *shrinker);
extern int register_shrinker(struct shrinker *shrinker);
extern void unregister_shrinker(struct shrinker *shrinker);
extern void free_prealloced_shrinker(struct shrinker *shrinker);
extern void synchronize_shrinkers(void);
# 14 "./include/linux/list_lru.h" 2
# 1 "./include/linux/xarray.h" 1
# 15 "./include/linux/xarray.h"
# 1 "./include/linux/gfp.h" 1





# 1 "./include/linux/mmzone.h" 1
# 18 "./include/linux/mmzone.h"
# 1 "./include/linux/pageblock-flags.h" 1
# 18 "./include/linux/pageblock-flags.h"
enum pageblock_bits {
PB_migrate,
PB_migrate_end = PB_migrate + 3 - 1,

PB_migrate_skip,





NR_PAGEBLOCK_BITS
};
# 58 "./include/linux/pageblock-flags.h"
struct page;

unsigned long get_pfnblock_flags_mask(const struct page *page,
unsigned long pfn,
unsigned long mask);

void set_pfnblock_flags_mask(struct page *page,
unsigned long flags,
unsigned long pfn,
unsigned long mask);
# 19 "./include/linux/mmzone.h" 2



# 1 "./include/linux/page-flags.h" 1
# 100 "./include/linux/page-flags.h"
enum pageflags {
PG_locked,
PG_referenced,
PG_uptodate,
PG_dirty,
PG_lru,
PG_active,
PG_workingset,
PG_waiters,
PG_error,
PG_slab,
PG_owner_priv_1,
PG_arch_1,
PG_reserved,
PG_private,
PG_private_2,
PG_writeback,
PG_head,
PG_mappedtodisk,
PG_reclaim,
PG_swapbacked,
PG_unevictable,

PG_mlocked,
# 132 "./include/linux/page-flags.h"
PG_young,
PG_idle,


PG_arch_2,




__NR_PAGEFLAGS,

PG_readahead = PG_reclaim,


PG_checked = PG_owner_priv_1,


PG_swapcache = PG_owner_priv_1,





PG_fscache = PG_private_2,



PG_pinned = PG_owner_priv_1,

PG_savepinned = PG_dirty,

PG_foreign = PG_owner_priv_1,

PG_xen_remapped = PG_owner_priv_1,


PG_slob_free = PG_private,


PG_double_map = PG_workingset,
# 183 "./include/linux/page-flags.h"
PG_isolated = PG_reclaim,


PG_reported = PG_uptodate,
};
# 245 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct page *page_fixed_fake_head(const struct page *page)
{
return page;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool hugetlb_free_vmemmap_enabled(void)
{
return false;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int page_is_fake_head(struct page *page)
{
return page_fixed_fake_head(page) != page;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long _compound_head(const struct page *page)
{
unsigned long head = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_148(void) ; if (!((sizeof(page->compound_head) == sizeof(char) || sizeof(page->compound_head) == sizeof(short) || sizeof(page->compound_head) == sizeof(int) || sizeof(page->compound_head) == sizeof(long)) || sizeof(page->compound_head) == sizeof(long long))) __compiletime_assert_148(); } while (0); (*(const volatile typeof( _Generic((page->compound_head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (page->compound_head))) *)&(page->compound_head)); });

if (__builtin_expect(!!(head & 1), 0))
return head - 1;
return (unsigned long)page_fixed_fake_head(page);
}
# 300 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageTail(struct page *page)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_149(void) ; if (!((sizeof(page->compound_head) == sizeof(char) || sizeof(page->compound_head) == sizeof(short) || sizeof(page->compound_head) == sizeof(int) || sizeof(page->compound_head) == sizeof(long)) || sizeof(page->compound_head) == sizeof(long long))) __compiletime_assert_149(); } while (0); (*(const volatile typeof( _Generic((page->compound_head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (page->compound_head))) *)&(page->compound_head)); }) & 1 || page_is_fake_head(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageCompound(struct page *page)
{
return arch_test_bit(PG_head, &page->flags) ||
({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_150(void) ; if (!((sizeof(page->compound_head) == sizeof(char) || sizeof(page->compound_head) == sizeof(short) || sizeof(page->compound_head) == sizeof(int) || sizeof(page->compound_head) == sizeof(long)) || sizeof(page->compound_head) == sizeof(long long))) __compiletime_assert_150(); } while (0); (*(const volatile typeof( _Generic((page->compound_head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (page->compound_head))) *)&(page->compound_head)); }) & 1;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int PagePoisoned(const struct page *page)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_151(void) ; if (!((sizeof(page->flags) == sizeof(char) || sizeof(page->flags) == sizeof(short) || sizeof(page->flags) == sizeof(int) || sizeof(page->flags) == sizeof(long)) || sizeof(page->flags) == sizeof(long long))) __compiletime_assert_151(); } while (0); (*(const volatile typeof( _Generic((page->flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (page->flags))) *)&(page->flags)); }) == -1l;
}


void page_init_poison(struct page *page, size_t size);






static unsigned long *folio_flags(struct folio *folio, unsigned n)
{
struct page *page = &folio->page;

do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (329), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
do { if (__builtin_expect(!!(n > 0 && !arch_test_bit(PG_head, &page->flags)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "n > 0 && !arch_test_bit(PG_head, &page->flags)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (330), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
return &page[n].flags;
}
# 483 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_locked(struct folio *folio) { return arch_test_bit(PG_locked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageLocked(struct page *page) { return arch_test_bit(PG_locked, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (483), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (483), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_set_locked(struct folio *folio) { arch___set_bit(PG_locked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __SetPageLocked(struct page *page) { arch___set_bit(PG_locked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (483), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (483), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_clear_locked(struct folio *folio) { arch___clear_bit(PG_locked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageLocked(struct page *page) { arch___clear_bit(PG_locked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (483), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (483), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_waiters(struct folio *folio) { return arch_test_bit(PG_waiters, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageWaiters(struct page *page) { return arch_test_bit(PG_waiters, &({ do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (484), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (484), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_waiters(struct folio *folio) { set_bit(PG_waiters, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageWaiters(struct page *page) { set_bit(PG_waiters, &({ do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (484), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (484), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_waiters(struct folio *folio) { clear_bit(PG_waiters, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageWaiters(struct page *page) { clear_bit(PG_waiters, &({ do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (484), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (484), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_error(struct folio *folio) { return arch_test_bit(PG_error, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageError(struct page *page) { return arch_test_bit(PG_error, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (485), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (485), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_error(struct folio *folio) { set_bit(PG_error, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageError(struct page *page) { set_bit(PG_error, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (485), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (485), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_error(struct folio *folio) { clear_bit(PG_error, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageError(struct page *page) { clear_bit(PG_error, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (485), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (485), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_error(struct folio *folio) { return test_and_clear_bit(PG_error, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageError(struct page *page) { return test_and_clear_bit(PG_error, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (485), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (485), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_referenced(struct folio *folio) { return arch_test_bit(PG_referenced, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageReferenced(struct page *page) { return arch_test_bit(PG_referenced, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (486), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_referenced(struct folio *folio) { set_bit(PG_referenced, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageReferenced(struct page *page) { set_bit(PG_referenced, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (486), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_referenced(struct folio *folio) { clear_bit(PG_referenced, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageReferenced(struct page *page) { clear_bit(PG_referenced, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (486), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_referenced(struct folio *folio) { return test_and_clear_bit(PG_referenced, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageReferenced(struct page *page) { return test_and_clear_bit(PG_referenced, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (487), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_set_referenced(struct folio *folio) { arch___set_bit(PG_referenced, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __SetPageReferenced(struct page *page) { arch___set_bit(PG_referenced, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (488), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_dirty(struct folio *folio) { return arch_test_bit(PG_dirty, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageDirty(struct page *page) { return arch_test_bit(PG_dirty, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (489), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_dirty(struct folio *folio) { set_bit(PG_dirty, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageDirty(struct page *page) { set_bit(PG_dirty, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (489), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_dirty(struct folio *folio) { clear_bit(PG_dirty, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageDirty(struct page *page) { clear_bit(PG_dirty, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (489), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_set_dirty(struct folio *folio) { return test_and_set_bit(PG_dirty, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestSetPageDirty(struct page *page) { return test_and_set_bit(PG_dirty, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (489), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_dirty(struct folio *folio) { return test_and_clear_bit(PG_dirty, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageDirty(struct page *page) { return test_and_clear_bit(PG_dirty, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (489), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_clear_dirty(struct folio *folio) { arch___clear_bit(PG_dirty, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageDirty(struct page *page) { arch___clear_bit(PG_dirty, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (490), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_lru(struct folio *folio) { return arch_test_bit(PG_lru, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageLRU(struct page *page) { return arch_test_bit(PG_lru, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (491), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_lru(struct folio *folio) { set_bit(PG_lru, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageLRU(struct page *page) { set_bit(PG_lru, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (491), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_lru(struct folio *folio) { clear_bit(PG_lru, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageLRU(struct page *page) { clear_bit(PG_lru, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (491), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_clear_lru(struct folio *folio) { arch___clear_bit(PG_lru, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageLRU(struct page *page) { arch___clear_bit(PG_lru, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (491), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_lru(struct folio *folio) { return test_and_clear_bit(PG_lru, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageLRU(struct page *page) { return test_and_clear_bit(PG_lru, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (492), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_active(struct folio *folio) { return arch_test_bit(PG_active, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageActive(struct page *page) { return arch_test_bit(PG_active, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (493), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_active(struct folio *folio) { set_bit(PG_active, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageActive(struct page *page) { set_bit(PG_active, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (493), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_active(struct folio *folio) { clear_bit(PG_active, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageActive(struct page *page) { clear_bit(PG_active, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (493), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_clear_active(struct folio *folio) { arch___clear_bit(PG_active, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageActive(struct page *page) { arch___clear_bit(PG_active, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (493), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_active(struct folio *folio) { return test_and_clear_bit(PG_active, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageActive(struct page *page) { return test_and_clear_bit(PG_active, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (494), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_workingset(struct folio *folio) { return arch_test_bit(PG_workingset, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageWorkingset(struct page *page) { return arch_test_bit(PG_workingset, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (495), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_workingset(struct folio *folio) { set_bit(PG_workingset, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageWorkingset(struct page *page) { set_bit(PG_workingset, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (495), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_workingset(struct folio *folio) { clear_bit(PG_workingset, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageWorkingset(struct page *page) { clear_bit(PG_workingset, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (495), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_workingset(struct folio *folio) { return test_and_clear_bit(PG_workingset, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageWorkingset(struct page *page) { return test_and_clear_bit(PG_workingset, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (496), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_slab(struct folio *folio) { return arch_test_bit(PG_slab, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageSlab(struct page *page) { return arch_test_bit(PG_slab, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (497), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (497), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_set_slab(struct folio *folio) { arch___set_bit(PG_slab, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __SetPageSlab(struct page *page) { arch___set_bit(PG_slab, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (497), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (497), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_clear_slab(struct folio *folio) { arch___clear_bit(PG_slab, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageSlab(struct page *page) { arch___clear_bit(PG_slab, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (497), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (497), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_slob_free(struct folio *folio) { return arch_test_bit(PG_slob_free, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageSlobFree(struct page *page) { return arch_test_bit(PG_slob_free, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (498), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (498), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_set_slob_free(struct folio *folio) { arch___set_bit(PG_slob_free, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __SetPageSlobFree(struct page *page) { arch___set_bit(PG_slob_free, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (498), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (498), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_clear_slob_free(struct folio *folio) { arch___clear_bit(PG_slob_free, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageSlobFree(struct page *page) { arch___clear_bit(PG_slob_free, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (498), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (498), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_checked(struct folio *folio) { return arch_test_bit(PG_checked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageChecked(struct page *page) { return arch_test_bit(PG_checked, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (499), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (499), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_checked(struct folio *folio) { set_bit(PG_checked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageChecked(struct page *page) { set_bit(PG_checked, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (499), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (499), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_checked(struct folio *folio) { clear_bit(PG_checked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageChecked(struct page *page) { clear_bit(PG_checked, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (499), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (499), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_pinned(struct folio *folio) { return arch_test_bit(PG_pinned, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PagePinned(struct page *page) { return arch_test_bit(PG_pinned, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (502), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (502), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_pinned(struct folio *folio) { set_bit(PG_pinned, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPagePinned(struct page *page) { set_bit(PG_pinned, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (502), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (502), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_pinned(struct folio *folio) { clear_bit(PG_pinned, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPagePinned(struct page *page) { clear_bit(PG_pinned, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (502), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (502), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_set_pinned(struct folio *folio) { return test_and_set_bit(PG_pinned, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestSetPagePinned(struct page *page) { return test_and_set_bit(PG_pinned, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (503), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (503), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_pinned(struct folio *folio) { return test_and_clear_bit(PG_pinned, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPagePinned(struct page *page) { return test_and_clear_bit(PG_pinned, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (503), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (503), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_savepinned(struct folio *folio) { return arch_test_bit(PG_savepinned, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageSavePinned(struct page *page) { return arch_test_bit(PG_savepinned, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (504), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (504), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_savepinned(struct folio *folio) { set_bit(PG_savepinned, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageSavePinned(struct page *page) { set_bit(PG_savepinned, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (504), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (504), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_savepinned(struct folio *folio) { clear_bit(PG_savepinned, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageSavePinned(struct page *page) { clear_bit(PG_savepinned, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (504), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (504), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); };
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_foreign(struct folio *folio) { return arch_test_bit(PG_foreign, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageForeign(struct page *page) { return arch_test_bit(PG_foreign, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (505), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (505), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_foreign(struct folio *folio) { set_bit(PG_foreign, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageForeign(struct page *page) { set_bit(PG_foreign, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (505), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (505), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_foreign(struct folio *folio) { clear_bit(PG_foreign, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageForeign(struct page *page) { clear_bit(PG_foreign, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (505), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (505), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); };
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_xen_remapped(struct folio *folio) { return arch_test_bit(PG_xen_remapped, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageXenRemapped(struct page *page) { return arch_test_bit(PG_xen_remapped, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (506), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (506), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_xen_remapped(struct folio *folio) { set_bit(PG_xen_remapped, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageXenRemapped(struct page *page) { set_bit(PG_xen_remapped, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (506), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (506), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_xen_remapped(struct folio *folio) { clear_bit(PG_xen_remapped, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageXenRemapped(struct page *page) { clear_bit(PG_xen_remapped, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (506), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (506), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_xen_remapped(struct folio *folio) { return test_and_clear_bit(PG_xen_remapped, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageXenRemapped(struct page *page) { return test_and_clear_bit(PG_xen_remapped, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (507), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (507), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_reserved(struct folio *folio) { return arch_test_bit(PG_reserved, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageReserved(struct page *page) { return arch_test_bit(PG_reserved, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (509), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (509), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_reserved(struct folio *folio) { set_bit(PG_reserved, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageReserved(struct page *page) { set_bit(PG_reserved, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (509), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (509), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_reserved(struct folio *folio) { clear_bit(PG_reserved, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageReserved(struct page *page) { clear_bit(PG_reserved, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (509), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (509), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_clear_reserved(struct folio *folio) { arch___clear_bit(PG_reserved, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageReserved(struct page *page) { arch___clear_bit(PG_reserved, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (510), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (510), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_set_reserved(struct folio *folio) { arch___set_bit(PG_reserved, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __SetPageReserved(struct page *page) { arch___set_bit(PG_reserved, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (511), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (511), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_swapbacked(struct folio *folio) { return arch_test_bit(PG_swapbacked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageSwapBacked(struct page *page) { return arch_test_bit(PG_swapbacked, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (512), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (512), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_swapbacked(struct folio *folio) { set_bit(PG_swapbacked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageSwapBacked(struct page *page) { set_bit(PG_swapbacked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (512), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (512), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_swapbacked(struct folio *folio) { clear_bit(PG_swapbacked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageSwapBacked(struct page *page) { clear_bit(PG_swapbacked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (512), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (512), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_clear_swapbacked(struct folio *folio) { arch___clear_bit(PG_swapbacked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageSwapBacked(struct page *page) { arch___clear_bit(PG_swapbacked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (513), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (513), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_set_swapbacked(struct folio *folio) { arch___set_bit(PG_swapbacked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __SetPageSwapBacked(struct page *page) { arch___set_bit(PG_swapbacked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (514), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (514), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_private(struct folio *folio) { return arch_test_bit(PG_private, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PagePrivate(struct page *page) { return arch_test_bit(PG_private, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (521), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_private(struct folio *folio) { set_bit(PG_private, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPagePrivate(struct page *page) { set_bit(PG_private, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (521), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_private(struct folio *folio) { clear_bit(PG_private, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPagePrivate(struct page *page) { clear_bit(PG_private, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (521), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_private_2(struct folio *folio) { return arch_test_bit(PG_private_2, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PagePrivate2(struct page *page) { return arch_test_bit(PG_private_2, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (522), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_private_2(struct folio *folio) { set_bit(PG_private_2, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPagePrivate2(struct page *page) { set_bit(PG_private_2, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (522), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_private_2(struct folio *folio) { clear_bit(PG_private_2, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPagePrivate2(struct page *page) { clear_bit(PG_private_2, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (522), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_set_private_2(struct folio *folio) { return test_and_set_bit(PG_private_2, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestSetPagePrivate2(struct page *page) { return test_and_set_bit(PG_private_2, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (522), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_private_2(struct folio *folio) { return test_and_clear_bit(PG_private_2, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPagePrivate2(struct page *page) { return test_and_clear_bit(PG_private_2, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (522), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_owner_priv_1(struct folio *folio) { return arch_test_bit(PG_owner_priv_1, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageOwnerPriv1(struct page *page) { return arch_test_bit(PG_owner_priv_1, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (523), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_owner_priv_1(struct folio *folio) { set_bit(PG_owner_priv_1, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageOwnerPriv1(struct page *page) { set_bit(PG_owner_priv_1, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (523), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_owner_priv_1(struct folio *folio) { clear_bit(PG_owner_priv_1, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageOwnerPriv1(struct page *page) { clear_bit(PG_owner_priv_1, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (523), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_owner_priv_1(struct folio *folio) { return test_and_clear_bit(PG_owner_priv_1, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageOwnerPriv1(struct page *page) { return test_and_clear_bit(PG_owner_priv_1, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (524), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); }





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_writeback(struct folio *folio) { return arch_test_bit(PG_writeback, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageWriteback(struct page *page) { return arch_test_bit(PG_writeback, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (530), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (530), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_set_writeback(struct folio *folio) { return test_and_set_bit(PG_writeback, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestSetPageWriteback(struct page *page) { return test_and_set_bit(PG_writeback, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (531), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (531), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_writeback(struct folio *folio) { return test_and_clear_bit(PG_writeback, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageWriteback(struct page *page) { return test_and_clear_bit(PG_writeback, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (531), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (531), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_mappedtodisk(struct folio *folio) { return arch_test_bit(PG_mappedtodisk, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageMappedToDisk(struct page *page) { return arch_test_bit(PG_mappedtodisk, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (532), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (532), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_mappedtodisk(struct folio *folio) { set_bit(PG_mappedtodisk, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageMappedToDisk(struct page *page) { set_bit(PG_mappedtodisk, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (532), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (532), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_mappedtodisk(struct folio *folio) { clear_bit(PG_mappedtodisk, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageMappedToDisk(struct page *page) { clear_bit(PG_mappedtodisk, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (532), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (532), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_reclaim(struct folio *folio) { return arch_test_bit(PG_reclaim, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageReclaim(struct page *page) { return arch_test_bit(PG_reclaim, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (535), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (535), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_reclaim(struct folio *folio) { set_bit(PG_reclaim, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageReclaim(struct page *page) { set_bit(PG_reclaim, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (535), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (535), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_reclaim(struct folio *folio) { clear_bit(PG_reclaim, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageReclaim(struct page *page) { clear_bit(PG_reclaim, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (535), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (535), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_reclaim(struct folio *folio) { return test_and_clear_bit(PG_reclaim, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageReclaim(struct page *page) { return test_and_clear_bit(PG_reclaim, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (536), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (536), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_readahead(struct folio *folio) { return arch_test_bit(PG_readahead, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageReadahead(struct page *page) { return arch_test_bit(PG_readahead, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (537), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (537), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_readahead(struct folio *folio) { set_bit(PG_readahead, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageReadahead(struct page *page) { set_bit(PG_readahead, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (537), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (537), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_readahead(struct folio *folio) { clear_bit(PG_readahead, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageReadahead(struct page *page) { clear_bit(PG_readahead, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (537), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (537), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_readahead(struct folio *folio) { return test_and_clear_bit(PG_readahead, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageReadahead(struct page *page) { return test_and_clear_bit(PG_readahead, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (538), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (538), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }
# 547 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_highmem(const struct folio *folio) { return false; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int PageHighMem(const struct page *page) { return 0; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_set_highmem(struct folio *folio) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void SetPageHighMem(struct page *page) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_clear_highmem(struct folio *folio) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ClearPageHighMem(struct page *page) { }



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_swapcache(struct folio *folio)
{
return folio_test_swapbacked(folio) &&
arch_test_bit(PG_swapcache, folio_flags(folio, 0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool PageSwapCache(struct page *page)
{
return folio_test_swapcache((_Generic((page), const struct page *: (const struct folio *)_compound_head(page), struct page *: (struct folio *)_compound_head(page))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_swapcache(struct folio *folio) { set_bit(PG_swapcache, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageSwapCache(struct page *page) { set_bit(PG_swapcache, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (562), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (562), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_swapcache(struct folio *folio) { clear_bit(PG_swapcache, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageSwapCache(struct page *page) { clear_bit(PG_swapcache, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (563), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (563), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_unevictable(struct folio *folio) { return arch_test_bit(PG_unevictable, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageUnevictable(struct page *page) { return arch_test_bit(PG_unevictable, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (568), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_unevictable(struct folio *folio) { set_bit(PG_unevictable, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageUnevictable(struct page *page) { set_bit(PG_unevictable, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (568), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_unevictable(struct folio *folio) { clear_bit(PG_unevictable, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageUnevictable(struct page *page) { clear_bit(PG_unevictable, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (568), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_clear_unevictable(struct folio *folio) { arch___clear_bit(PG_unevictable, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageUnevictable(struct page *page) { arch___clear_bit(PG_unevictable, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (569), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_unevictable(struct folio *folio) { return test_and_clear_bit(PG_unevictable, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageUnevictable(struct page *page) { return test_and_clear_bit(PG_unevictable, &({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (570), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); })->flags); }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_mlocked(struct folio *folio) { return arch_test_bit(PG_mlocked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageMlocked(struct page *page) { return arch_test_bit(PG_mlocked, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (573), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (573), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_mlocked(struct folio *folio) { set_bit(PG_mlocked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageMlocked(struct page *page) { set_bit(PG_mlocked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (573), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (573), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_mlocked(struct folio *folio) { clear_bit(PG_mlocked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageMlocked(struct page *page) { clear_bit(PG_mlocked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (573), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (573), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_clear_mlocked(struct folio *folio) { arch___clear_bit(PG_mlocked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageMlocked(struct page *page) { arch___clear_bit(PG_mlocked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (574), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (574), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_set_mlocked(struct folio *folio) { return test_and_set_bit(PG_mlocked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestSetPageMlocked(struct page *page) { return test_and_set_bit(PG_mlocked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (575), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (575), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_mlocked(struct folio *folio) { return test_and_clear_bit(PG_mlocked, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageMlocked(struct page *page) { return test_and_clear_bit(PG_mlocked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (575), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (575), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }
# 584 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_uncached(const struct folio *folio) { return false; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int PageUncached(const struct page *page) { return 0; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_set_uncached(struct folio *folio) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void SetPageUncached(struct page *page) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_clear_uncached(struct folio *folio) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ClearPageUncached(struct page *page) { }
# 597 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_hwpoison(const struct folio *folio) { return false; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int PageHWPoison(const struct page *page) { return 0; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_set_hwpoison(struct folio *folio) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void SetPageHWPoison(struct page *page) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_clear_hwpoison(struct folio *folio) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ClearPageHWPoison(struct page *page) { }




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_young(struct folio *folio) { return arch_test_bit(PG_young, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageYoung(struct page *page) { return arch_test_bit(PG_young, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (602), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_young(struct folio *folio) { set_bit(PG_young, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageYoung(struct page *page) { set_bit(PG_young, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (603), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_young(struct folio *folio) { return test_and_clear_bit(PG_young, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageYoung(struct page *page) { return test_and_clear_bit(PG_young, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (604), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_idle(struct folio *folio) { return arch_test_bit(PG_idle, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageIdle(struct page *page) { return arch_test_bit(PG_idle, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (605), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_idle(struct folio *folio) { set_bit(PG_idle, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageIdle(struct page *page) { set_bit(PG_idle, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (605), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_idle(struct folio *folio) { clear_bit(PG_idle, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageIdle(struct page *page) { clear_bit(PG_idle, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (605), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); }





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_skip_kasan_poison(const struct folio *folio) { return false; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int PageSkipKASanPoison(const struct page *page) { return 0; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_set_skip_kasan_poison(struct folio *folio) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void SetPageSkipKASanPoison(struct page *page) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_clear_skip_kasan_poison(struct folio *folio) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ClearPageSkipKASanPoison(struct page *page) { }
# 620 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_reported(struct folio *folio) { return arch_test_bit(PG_reported, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageReported(struct page *page) { return arch_test_bit(PG_reported, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (620), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (620), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_set_reported(struct folio *folio) { arch___set_bit(PG_reported, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __SetPageReported(struct page *page) { arch___set_bit(PG_reported, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (620), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (620), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_clear_reported(struct folio *folio) { arch___clear_bit(PG_reported, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageReported(struct page *page) { arch___clear_bit(PG_reported, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (620), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (620), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }
# 644 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageMappingFlags(struct page *page)
{
return ((unsigned long)page->mapping & (0x1 | 0x2)) != 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_anon(struct folio *folio)
{
return ((unsigned long)folio->mapping & 0x1) != 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool PageAnon(struct page *page)
{
return folio_test_anon((_Generic((page), const struct page *: (const struct folio *)_compound_head(page), struct page *: (struct folio *)_compound_head(page))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int __PageMovable(struct page *page)
{
return ((unsigned long)page->mapping & (0x1 | 0x2)) ==
0x2;
}
# 683 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_ksm(const struct folio *folio) { return false; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int PageKsm(const struct page *page) { return 0; }


u64 stable_page_flags(struct page *page);
# 698 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_uptodate(struct folio *folio)
{
bool ret = arch_test_bit(PG_uptodate, folio_flags(folio, 0));
# 709 "./include/linux/page-flags.h"
if (ret)
do { do { } while (0); __asm__ __volatile__ ("fence " "r" "," "r" : : : "memory"); } while (0);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int PageUptodate(struct page *page)
{
return folio_test_uptodate((_Generic((page), const struct page *: (const struct folio *)_compound_head(page), struct page *: (struct folio *)_compound_head(page))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_mark_uptodate(struct folio *folio)
{
do { do { } while (0); __asm__ __volatile__ ("fence " "w" "," "w" : : : "memory"); } while (0);
arch___set_bit(PG_uptodate, folio_flags(folio, 0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_mark_uptodate(struct folio *folio)
{





do { do { } while (0); __asm__ __volatile__ ("fence " "w" "," "w" : : : "memory"); } while (0);
set_bit(PG_uptodate, folio_flags(folio, 0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __SetPageUptodate(struct page *page)
{
__folio_mark_uptodate((struct folio *)page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageUptodate(struct page *page)
{
folio_mark_uptodate((struct folio *)page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_uptodate(struct folio *folio) { clear_bit(PG_uptodate, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageUptodate(struct page *page) { clear_bit(PG_uptodate, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (747), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(((typeof(page))_compound_head(page)))), 0)) { dump_page(((typeof(page))_compound_head(page)), "VM_BUG_ON_PAGE(" "PagePoisoned(((typeof(page))_compound_head(page)))"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (747), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ((typeof(page))_compound_head(page)); }); })->flags); }

bool __folio_start_writeback(struct folio *folio, bool keep_write);
bool set_page_writeback(struct page *page);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_page_writeback_keepwrite(struct page *page)
{
__folio_start_writeback((_Generic((page), const struct page *: (const struct folio *)_compound_head(page), struct page *: (struct folio *)_compound_head(page))), true);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool test_set_page_writeback(struct page *page)
{
return set_page_writeback(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_head(struct folio *folio)
{
return arch_test_bit(PG_head, folio_flags(folio, 0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageHead(struct page *page)
{
({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (774), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; });
return arch_test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_set_head(struct folio *folio) { arch___set_bit(PG_head, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __SetPageHead(struct page *page) { arch___set_bit(PG_head, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (778), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __folio_clear_head(struct folio *folio) { arch___clear_bit(PG_head, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageHead(struct page *page) { arch___clear_bit(PG_head, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (779), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_head(struct folio *folio) { clear_bit(PG_head, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageHead(struct page *page) { clear_bit(PG_head, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (780), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); }







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_large(struct folio *folio)
{
return folio_test_head(folio);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void set_compound_head(struct page *page, struct page *head)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_152(void) ; if (!((sizeof(page->compound_head) == sizeof(char) || sizeof(page->compound_head) == sizeof(short) || sizeof(page->compound_head) == sizeof(int) || sizeof(page->compound_head) == sizeof(long)) || sizeof(page->compound_head) == sizeof(long long))) __compiletime_assert_152(); } while (0); do { *(volatile typeof(page->compound_head) *)&(page->compound_head) = ((unsigned long)head + 1); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void clear_compound_head(struct page *page)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_153(void) ; if (!((sizeof(page->compound_head) == sizeof(char) || sizeof(page->compound_head) == sizeof(short) || sizeof(page->compound_head) == sizeof(int) || sizeof(page->compound_head) == sizeof(long)) || sizeof(page->compound_head) == sizeof(long long))) __compiletime_assert_153(); } while (0); do { *(volatile typeof(page->compound_head) *)&(page->compound_head) = (0); } while (0); } while (0);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ClearPageCompound(struct page *page)
{
do { if (__builtin_expect(!!(!PageHead(page)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (806), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
ClearPageHead(page);
}
# 821 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_hugetlb(const struct folio *folio) { return false; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int PageHuge(const struct page *page) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_headhuge(const struct folio *folio) { return false; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int PageHeadHuge(const struct page *page) { return 0; }
# 834 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int PageTransHuge(struct page *page)
{
do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (836), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
return PageHead(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_transhuge(struct folio *folio)
{
return folio_test_head(folio);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int PageTransCompound(struct page *page)
{
return PageCompound(page);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int PageTransTail(struct page *page)
{
return PageTail(page);
}
# 878 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_double_map(struct folio *folio) { return arch_test_bit(PG_double_map, folio_flags(folio, 1)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageDoubleMap(struct page *page) { return arch_test_bit(PG_double_map, &({ do { if (__builtin_expect(!!(!PageHead(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageHead(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (878), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(&page[1])), 0)) { dump_page(&page[1], "VM_BUG_ON_PAGE(" "PagePoisoned(&page[1])"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (878), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); &page[1]; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_double_map(struct folio *folio) { set_bit(PG_double_map, folio_flags(folio, 1)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageDoubleMap(struct page *page) { set_bit(PG_double_map, &({ do { if (__builtin_expect(!!(!PageHead(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageHead(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (878), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(&page[1])), 0)) { dump_page(&page[1], "VM_BUG_ON_PAGE(" "PagePoisoned(&page[1])"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (878), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); &page[1]; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_double_map(struct folio *folio) { clear_bit(PG_double_map, folio_flags(folio, 1)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageDoubleMap(struct page *page) { clear_bit(PG_double_map, &({ do { if (__builtin_expect(!!(!PageHead(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageHead(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (878), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(&page[1])), 0)) { dump_page(&page[1], "VM_BUG_ON_PAGE(" "PagePoisoned(&page[1])"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (878), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); &page[1]; }); })->flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_set_double_map(struct folio *folio) { return test_and_set_bit(PG_double_map, folio_flags(folio, 1)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestSetPageDoubleMap(struct page *page) { return test_and_set_bit(PG_double_map, &({ do { if (__builtin_expect(!!(!PageHead(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageHead(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (879), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(&page[1])), 0)) { dump_page(&page[1], "VM_BUG_ON_PAGE(" "PagePoisoned(&page[1])"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (879), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); &page[1]; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_clear_double_map(struct folio *folio) { return test_and_clear_bit(PG_double_map, folio_flags(folio, 1)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int TestClearPageDoubleMap(struct page *page) { return test_and_clear_bit(PG_double_map, &({ do { if (__builtin_expect(!!(!PageHead(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageHead(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (879), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(&page[1])), 0)) { dump_page(&page[1], "VM_BUG_ON_PAGE(" "PagePoisoned(&page[1])"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (879), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); &page[1]; }); })->flags); }
# 899 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_has_hwpoisoned(const struct folio *folio) { return false; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int PageHasHWPoisoned(const struct page *page) { return 0; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_set_has_hwpoisoned(struct folio *folio) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void SetPageHasHWPoisoned(struct page *page) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_clear_has_hwpoisoned(struct folio *folio) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ClearPageHasHWPoisoned(struct page *page) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_set_has_hwpoisoned(struct folio *folio) { return 0; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int TestSetPageHasHWPoisoned(struct page *page) { return 0; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_clear_has_hwpoisoned(struct folio *folio) { return 0; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int TestClearPageHasHWPoisoned(struct page *page) { return 0; }







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_page_hwpoison(struct page *page)
{
if (PageHWPoison(page))
return true;
return PageHuge(page) && PageHWPoison(((typeof(page))_compound_head(page)));
}
# 935 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_has_type(struct page *page)
{
return (int)page->page_type < -128;
}
# 960 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageBuddy(struct page *page) { return ((page->page_type & (0xf0000000 | 0x00000080)) == 0xf0000000); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __SetPageBuddy(struct page *page) { do { if (__builtin_expect(!!(!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (960), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type &= ~0x00000080; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageBuddy(struct page *page) { do { if (__builtin_expect(!!(!PageBuddy(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageBuddy(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (960), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type |= 0x00000080; }
# 984 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageOffline(struct page *page) { return ((page->page_type & (0xf0000000 | 0x00000100)) == 0xf0000000); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __SetPageOffline(struct page *page) { do { if (__builtin_expect(!!(!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (984), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type &= ~0x00000100; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageOffline(struct page *page) { do { if (__builtin_expect(!!(!PageOffline(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageOffline(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (984), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type |= 0x00000100; }

extern void page_offline_freeze(void);
extern void page_offline_thaw(void);
extern void page_offline_begin(void);
extern void page_offline_end(void);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageTable(struct page *page) { return ((page->page_type & (0xf0000000 | 0x00000200)) == 0xf0000000); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __SetPageTable(struct page *page) { do { if (__builtin_expect(!!(!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (994), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type &= ~0x00000200; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageTable(struct page *page) { do { if (__builtin_expect(!!(!PageTable(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageTable(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (994), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type |= 0x00000200; }




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageGuard(struct page *page) { return ((page->page_type & (0xf0000000 | 0x00000400)) == 0xf0000000); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __SetPageGuard(struct page *page) { do { if (__builtin_expect(!!(!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (999), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type &= ~0x00000400; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __ClearPageGuard(struct page *page) { do { if (__builtin_expect(!!(!PageGuard(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageGuard(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (999), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type |= 0x00000400; }

extern bool is_free_buddy_page(struct page *page);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool folio_test_isolated(struct folio *folio) { return arch_test_bit(PG_isolated, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int PageIsolated(struct page *page) { return arch_test_bit(PG_isolated, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (1003), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_set_isolated(struct folio *folio) { set_bit(PG_isolated, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void SetPageIsolated(struct page *page) { set_bit(PG_isolated, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (1003), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void folio_clear_isolated(struct folio *folio) { clear_bit(PG_isolated, folio_flags(folio, 0)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ClearPageIsolated(struct page *page) { clear_bit(PG_isolated, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (1003), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); };
# 1042 "./include/linux/page-flags.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_has_private(struct page *page)
{
return !!(page->flags & (1UL << PG_private | 1UL << PG_private_2));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_has_private(struct folio *folio)
{
return page_has_private(&folio->page);
}
# 23 "./include/linux/mmzone.h" 2
# 1 "./include/linux/local_lock.h" 1




# 1 "./include/linux/local_lock_internal.h" 1
# 11 "./include/linux/local_lock_internal.h"
typedef struct {

struct lockdep_map dep_map;
struct task_struct *owner;

} local_lock_t;
# 27 "./include/linux/local_lock_internal.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void local_lock_acquire(local_lock_t *l)
{
lock_acquire(&l->dep_map, 0, 0, 0, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; }));
({ int __ret = 0; if (!oops_in_progress && __builtin_expect(!!(l->owner), 0)) { do { } while(0); if (debug_locks_off() && !debug_locks_silent) ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("DEBUG_LOCKS_WARN_ON(%s)", "l->owner"); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/local_lock_internal.h"), "i" (30), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { } while(0); __ret = 1; } __ret; });
l->owner = get_current();
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void local_lock_release(local_lock_t *l)
{
({ int __ret = 0; if (!oops_in_progress && __builtin_expect(!!(l->owner != get_current()), 0)) { do { } while(0); if (debug_locks_off() && !debug_locks_silent) ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("DEBUG_LOCKS_WARN_ON(%s)", "l->owner != current"); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/local_lock_internal.h"), "i" (36), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { } while(0); __ret = 1; } __ret; });
l->owner = ((void *)0);
lock_release(&l->dep_map, ({ __label__ __here; __here: (unsigned long)&&__here; }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void local_lock_debug_init(local_lock_t *l)
{
l->owner = ((void *)0);
}
# 6 "./include/linux/local_lock.h" 2
# 24 "./include/linux/mmzone.h" 2
# 42 "./include/linux/mmzone.h"
enum migratetype {
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
MIGRATE_PCPTYPES,
MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
# 62 "./include/linux/mmzone.h"
MIGRATE_CMA,


MIGRATE_ISOLATE,

MIGRATE_TYPES
};


extern const char * const migratetype_names[MIGRATE_TYPES];
# 81 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_migrate_movable(int mt)
{
return __builtin_expect(!!((mt) == MIGRATE_CMA), 0) || mt == MIGRATE_MOVABLE;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool migratetype_is_mergeable(int mt)
{
return mt < MIGRATE_PCPTYPES;
}





extern int page_group_by_mobility_disabled;






struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *get_page_from_free_area(struct free_area *area,
int migratetype)
{
return ({ struct list_head *head__ = (&area->free_list[migratetype]); struct list_head *pos__ = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_154(void) ; if (!((sizeof(head__->next) == sizeof(char) || sizeof(head__->next) == sizeof(short) || sizeof(head__->next) == sizeof(int) || sizeof(head__->next) == sizeof(long)) || sizeof(head__->next) == sizeof(long long))) __compiletime_assert_154(); } while (0); (*(const volatile typeof( _Generic((head__->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (head__->next))) *)&(head__->next)); }); pos__ != head__ ? ({ void *__mptr = (void *)(pos__); _Static_assert(__builtin_types_compatible_p(typeof(*(pos__)), typeof(((struct page *)0)->lru)) || __builtin_types_compatible_p(typeof(*(pos__)), typeof(void)), "pointer type mismatch in container_of()"); ((struct page *)(__mptr - __builtin_offsetof(struct page, lru))); }) : ((void *)0); });

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool free_area_empty(struct free_area *area, int migratetype)
{
return list_empty(&area->free_list[migratetype]);
}

struct pglist_data;







struct zone_padding {
char x[0];
} __attribute__((__aligned__(1 << (6))));
# 155 "./include/linux/mmzone.h"
enum zone_stat_item {

NR_FREE_PAGES,
NR_ZONE_LRU_BASE,
NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
NR_ZONE_ACTIVE_ANON,
NR_ZONE_INACTIVE_FILE,
NR_ZONE_ACTIVE_FILE,
NR_ZONE_UNEVICTABLE,
NR_ZONE_WRITE_PENDING,
NR_MLOCK,

NR_BOUNCE,



NR_FREE_CMA_PAGES,
NR_VM_ZONE_STAT_ITEMS };

enum node_stat_item {
NR_LRU_BASE,
NR_INACTIVE_ANON = NR_LRU_BASE,
NR_ACTIVE_ANON,
NR_INACTIVE_FILE,
NR_ACTIVE_FILE,
NR_UNEVICTABLE,
NR_SLAB_RECLAIMABLE_B,
NR_SLAB_UNRECLAIMABLE_B,
NR_ISOLATED_ANON,
NR_ISOLATED_FILE,
WORKINGSET_NODES,
WORKINGSET_REFAULT_BASE,
WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
WORKINGSET_REFAULT_FILE,
WORKINGSET_ACTIVATE_BASE,
WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
WORKINGSET_ACTIVATE_FILE,
WORKINGSET_RESTORE_BASE,
WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
WORKINGSET_RESTORE_FILE,
WORKINGSET_NODERECLAIM,
NR_ANON_MAPPED,
NR_FILE_MAPPED,

NR_FILE_PAGES,
NR_FILE_DIRTY,
NR_WRITEBACK,
NR_WRITEBACK_TEMP,
NR_SHMEM,
NR_SHMEM_THPS,
NR_SHMEM_PMDMAPPED,
NR_FILE_THPS,
NR_FILE_PMDMAPPED,
NR_ANON_THPS,
NR_VMSCAN_WRITE,
NR_VMSCAN_IMMEDIATE,
NR_DIRTIED,
NR_WRITTEN,
NR_THROTTLED_WRITTEN,
NR_KERNEL_MISC_RECLAIMABLE,
NR_FOLL_PIN_ACQUIRED,
NR_FOLL_PIN_RELEASED,
NR_KERNEL_STACK_KB,



NR_PAGETABLE,

NR_SWAPCACHE,




NR_VM_NODE_STAT_ITEMS
};






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool vmstat_item_print_in_thp(enum node_stat_item item)
{
if (!1)
return false;

return item == NR_ANON_THPS ||
item == NR_FILE_THPS ||
item == NR_SHMEM_THPS ||
item == NR_SHMEM_PMDMAPPED ||
item == NR_FILE_PMDMAPPED;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool vmstat_item_in_bytes(int idx)
{
# 264 "./include/linux/mmzone.h"
return (idx == NR_SLAB_RECLAIMABLE_B ||
idx == NR_SLAB_UNRECLAIMABLE_B);
}
# 281 "./include/linux/mmzone.h"
enum lru_list {
LRU_INACTIVE_ANON = 0,
LRU_ACTIVE_ANON = 0 + 1,
LRU_INACTIVE_FILE = 0 + 2,
LRU_ACTIVE_FILE = 0 + 2 + 1,
LRU_UNEVICTABLE,
NR_LRU_LISTS
};

enum vmscan_throttle_state {
VMSCAN_THROTTLE_WRITEBACK,
VMSCAN_THROTTLE_ISOLATED,
VMSCAN_THROTTLE_NOPROGRESS,
VMSCAN_THROTTLE_CONGESTED,
NR_VMSCAN_THROTTLE,
};





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_file_lru(enum lru_list lru)
{
return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_active_lru(enum lru_list lru)
{
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}



enum lruvec_flags {
LRUVEC_CONGESTED,


};

struct lruvec {
struct list_head lists[NR_LRU_LISTS];

spinlock_t lru_lock;





unsigned long anon_cost;
unsigned long file_cost;

atomic_long_t nonresident_age;

unsigned long refaults[2];

unsigned long flags;



};
# 350 "./include/linux/mmzone.h"
typedef unsigned isolate_mode_t;

enum zone_watermarks {
WMARK_MIN,
WMARK_LOW,
WMARK_HIGH,
WMARK_PROMO,
NR_WMARK
};
# 384 "./include/linux/mmzone.h"
struct per_cpu_pages {
int count;
int high;
int batch;
short free_factor;





struct list_head lists[(MIGRATE_PCPTYPES * (3 + 1 + 1))];
};

struct per_cpu_zonestat {

s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
s8 stat_threshold;
# 410 "./include/linux/mmzone.h"
};

struct per_cpu_nodestat {
s8 stat_threshold;
s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
};



enum zone_type {
# 434 "./include/linux/mmzone.h"
ZONE_DMA32,






ZONE_NORMAL,
# 502 "./include/linux/mmzone.h"
ZONE_MOVABLE,



__MAX_NR_ZONES

};





struct zone {



unsigned long _watermark[NR_WMARK];
unsigned long watermark_boost;

unsigned long nr_reserved_highatomic;
# 532 "./include/linux/mmzone.h"
long lowmem_reserve[3];




struct pglist_data *zone_pgdat;
struct per_cpu_pages *per_cpu_pageset;
struct per_cpu_zonestat *per_cpu_zonestats;




int pageset_high;
int pageset_batch;
# 556 "./include/linux/mmzone.h"
unsigned long zone_start_pfn;
# 600 "./include/linux/mmzone.h"
atomic_long_t managed_pages;
unsigned long spanned_pages;
unsigned long present_pages;




unsigned long cma_pages;


const char *name;







unsigned long nr_isolate_pageblock;







int initialized;


struct zone_padding _pad1_;


struct free_area free_area[11];


unsigned long flags;


spinlock_t lock;


struct zone_padding _pad2_;






unsigned long percpu_drift_mark;



unsigned long compact_cached_free_pfn;

unsigned long compact_cached_migrate_pfn[2];
unsigned long compact_init_migrate_pfn;
unsigned long compact_init_free_pfn;
# 666 "./include/linux/mmzone.h"
unsigned int compact_considered;
unsigned int compact_defer_shift;
int compact_order_failed;




bool compact_blockskip_flush;


bool contiguous;

struct zone_padding _pad3_;

atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
atomic_long_t vm_numa_event[0];
} __attribute__((__aligned__(1 << (6))));

enum pgdat_flags {
PGDAT_DIRTY,



PGDAT_WRITEBACK,


PGDAT_RECLAIM_LOCKED,
};

enum zone_flags {
ZONE_BOOSTED_WATERMARK,


ZONE_RECLAIM_ACTIVE,
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long zone_managed_pages(struct zone *zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long zone_cma_pages(struct zone *zone)
{

return zone->cma_pages;



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long zone_end_pfn(const struct zone *zone)
{
return zone->zone_start_pfn + zone->spanned_pages;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
{
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool zone_is_initialized(struct zone *zone)
{
return zone->initialized;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool zone_is_empty(struct zone *zone)
{
return zone->spanned_pages == 0;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool zone_intersects(struct zone *zone,
unsigned long start_pfn, unsigned long nr_pages)
{
if (zone_is_empty(zone))
return false;
if (start_pfn >= zone_end_pfn(zone) ||
start_pfn + nr_pages <= zone->zone_start_pfn)
return false;

return true;
}
# 762 "./include/linux/mmzone.h"
enum {
ZONELIST_FALLBACK,







MAX_ZONELISTS
};





struct zoneref {
struct zone *zone;
int zone_idx;
};
# 797 "./include/linux/mmzone.h"
struct zonelist {
struct zoneref _zonerefs[((1 << 0) * 3) + 1];
};






extern struct page *mem_map;


struct deferred_split {
spinlock_t split_queue_lock;
struct list_head split_queue;
unsigned long split_queue_len;
};
# 824 "./include/linux/mmzone.h"
typedef struct pglist_data {





struct zone node_zones[3];






struct zonelist node_zonelists[MAX_ZONELISTS];

int nr_zones;
# 861 "./include/linux/mmzone.h"
unsigned long node_start_pfn;
unsigned long node_present_pages;
unsigned long node_spanned_pages;

int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;


wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE];

atomic_t nr_writeback_throttled;
unsigned long nr_reclaim_start;

struct task_struct *kswapd;

int kswapd_order;
enum zone_type kswapd_highest_zoneidx;

int kswapd_failures;


int kcompactd_max_order;
enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
bool proactive_compact_trigger;





unsigned long totalreserve_pages;
# 904 "./include/linux/mmzone.h"
struct zone_padding _pad1_;
# 915 "./include/linux/mmzone.h"
struct deferred_split deferred_split_queue;
# 925 "./include/linux/mmzone.h"
struct lruvec __lruvec;

unsigned long flags;

struct zone_padding _pad2_;


struct per_cpu_nodestat *per_cpu_nodestats;
atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
} pg_data_t;







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{
return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pgdat_is_empty(pg_data_t *pgdat)
{
return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
}


# 1 "./include/linux/memory_hotplug.h" 1




# 1 "./include/linux/mmzone.h" 1
# 6 "./include/linux/memory_hotplug.h" 2

# 1 "./include/linux/notifier.h" 1
# 16 "./include/linux/notifier.h"
# 1 "./include/linux/srcu.h" 1
# 22 "./include/linux/srcu.h"
# 1 "./include/linux/rcu_segcblist.h" 1
# 21 "./include/linux/rcu_segcblist.h"
struct rcu_cblist {
struct callback_head *head;
struct callback_head **tail;
long len;
};
# 206 "./include/linux/rcu_segcblist.h"
struct rcu_segcblist {
struct callback_head *head;
struct callback_head **tails[4];
unsigned long gp_seq[4];



long len;

long seglen[4];
u8 flags;
};
# 23 "./include/linux/srcu.h" 2

struct srcu_struct;



int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key);
# 49 "./include/linux/srcu.h"
# 1 "./include/linux/srcutree.h" 1
# 14 "./include/linux/srcutree.h"
# 1 "./include/linux/rcu_node_tree.h" 1
# 15 "./include/linux/srcutree.h" 2


struct srcu_node;
struct srcu_struct;





struct srcu_data {

unsigned long srcu_lock_count[2];
unsigned long srcu_unlock_count[2];


spinlock_t lock __attribute__((__aligned__(1 << (6))));
struct rcu_segcblist srcu_cblist;
unsigned long srcu_gp_seq_needed;
unsigned long srcu_gp_seq_needed_exp;
bool srcu_cblist_invoking;
struct timer_list delay_work;
struct work_struct work;
struct callback_head srcu_barrier_head;
struct srcu_node *mynode;
unsigned long grpmask;

int cpu;
struct srcu_struct *ssp;
};




struct srcu_node {
spinlock_t lock;
unsigned long srcu_have_cbs[4];


unsigned long srcu_data_have_cbs[4];

unsigned long srcu_gp_seq_needed_exp;
struct srcu_node *srcu_parent;
int grplo;
int grphi;
};




struct srcu_struct {
struct srcu_node node[(1 + (((32) + ((16)) - 1) / ((16))))];
struct srcu_node *level[2 + 1];

struct mutex srcu_cb_mutex;
spinlock_t lock;
struct mutex srcu_gp_mutex;
unsigned int srcu_idx;
unsigned long srcu_gp_seq;
unsigned long srcu_gp_seq_needed;
unsigned long srcu_gp_seq_needed_exp;
unsigned long srcu_last_gp_end;
struct srcu_data *sda;
unsigned long srcu_barrier_seq;
struct mutex srcu_barrier_mutex;
struct completion srcu_barrier_completion;

atomic_t srcu_barrier_cpu_cnt;


struct delayed_work work;
struct lockdep_map dep_map;
};
# 135 "./include/linux/srcutree.h"
void synchronize_srcu_expedited(struct srcu_struct *ssp);
void srcu_barrier(struct srcu_struct *ssp);
void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
# 50 "./include/linux/srcu.h" 2







void call_srcu(struct srcu_struct *ssp, struct callback_head *head,
void (*func)(struct callback_head *head));
void cleanup_srcu_struct(struct srcu_struct *ssp);
int __srcu_read_lock(struct srcu_struct *ssp) ;
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) ;
void synchronize_srcu(struct srcu_struct *ssp);
unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);


void srcu_init(void);
# 91 "./include/linux/srcu.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int srcu_read_lock_held(const struct srcu_struct *ssp)
{
if (!debug_lockdep_rcu_enabled())
return 1;
return lock_is_held(&ssp->dep_map);
}
# 160 "./include/linux/srcu.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int srcu_read_lock(struct srcu_struct *ssp)
{
int retval;

retval = __srcu_read_lock(ssp);
rcu_lock_acquire(&(ssp)->dep_map);
return retval;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((patchable_function_entry(0, 0))) int
srcu_read_lock_notrace(struct srcu_struct *ssp)
{
int retval;

retval = __srcu_read_lock(ssp);
return retval;
}
# 186 "./include/linux/srcu.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void srcu_read_unlock(struct srcu_struct *ssp, int idx)

{
({ int __ret_warn_on = !!(idx & ~0x1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/srcu.h"), "i" (189), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
rcu_lock_release(&(ssp)->dep_map);
__srcu_read_unlock(ssp, idx);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((patchable_function_entry(0, 0))) void
srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx)
{
__srcu_read_unlock(ssp, idx);
}
# 210 "./include/linux/srcu.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void smp_mb__after_srcu_read_unlock(void)
{

}
# 17 "./include/linux/notifier.h" 2
# 49 "./include/linux/notifier.h"
struct notifier_block;

typedef int (*notifier_fn_t)(struct notifier_block *nb,
unsigned long action, void *data);

struct notifier_block {
notifier_fn_t notifier_call;
struct notifier_block *next;
int priority;
};

struct atomic_notifier_head {
spinlock_t lock;
struct notifier_block *head;
};

struct blocking_notifier_head {
struct rw_semaphore rwsem;
struct notifier_block *head;
};

struct raw_notifier_head {
struct notifier_block *head;
};

struct srcu_notifier_head {
struct mutex mutex;
struct srcu_struct srcu;
struct notifier_block *head;
};
# 93 "./include/linux/notifier.h"
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
# 144 "./include/linux/notifier.h"
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
struct notifier_block *nb);
extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
struct notifier_block *nb);
extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
struct notifier_block *nb);

extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
struct notifier_block *nb);
extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
struct notifier_block *nb);
extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
struct notifier_block *nb);

extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v);
extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v);
extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v);
extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v);

extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v);
extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v);
# 187 "./include/linux/notifier.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int notifier_from_errno(int err)
{
if (err)
return 0x8000 | (0x0001 - err);

return 0x0001;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int notifier_to_errno(int ret)
{
ret &= ~0x8000;
return ret > 0x0001 ? 0x0001 - ret : 0;
}
# 231 "./include/linux/notifier.h"
extern struct blocking_notifier_head reboot_notifier_list;
# 8 "./include/linux/memory_hotplug.h" 2


struct page;
struct zone;
struct pglist_data;
struct mem_section;
struct memory_block;
struct memory_group;
struct resource;
struct vmem_altmap;
# 61 "./include/linux/memory_hotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pg_data_t *generic_alloc_nodedata(int nid)
{
do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/memory_hotplug.h"), "i" (63), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0);
return ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void generic_free_nodedata(pg_data_t *pgdat)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
{
}
# 226 "./include/linux/memory_hotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned zone_span_seqbegin(struct zone *zone)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int zone_span_seqretry(struct zone *zone, unsigned iv)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void zone_span_writelock(struct zone *zone) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void zone_span_writeunlock(struct zone *zone) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void zone_seqlock_init(struct zone *zone) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int try_online_node(int nid)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void get_online_mems(void) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_online_mems(void) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_hotplug_begin(void) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_hotplug_done(void) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool movable_node_is_enabled(void)
{
return false;
}







struct range arch_get_mappable_range(void);
# 285 "./include/linux/memory_hotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pgdat_resize_init(struct pglist_data *pgdat) {}
# 300 "./include/linux/memory_hotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void try_offline_node(int nid) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group)
{
return -22;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int remove_memory(u64 start, u64 size)
{
return -16;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __remove_memory(u64 start, u64 size) {}


extern void set_zone_contiguous(struct zone *zone);
extern void clear_zone_contiguous(struct zone *zone);
# 953 "./include/linux/mmzone.h" 2

void build_all_zonelists(pg_data_t *pgdat);
void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
enum zone_type highest_zoneidx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int highest_zoneidx, unsigned int alloc_flags,
long free_pages);
bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int highest_zoneidx,
unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, int highest_zoneidx);




enum meminit_context {
MEMINIT_EARLY,
MEMINIT_HOTPLUG,
};

extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size);

extern void lruvec_init(struct lruvec *lruvec);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
{



return ({ void *__mptr = (void *)(lruvec); _Static_assert(__builtin_types_compatible_p(typeof(*(lruvec)), typeof(((struct pglist_data *)0)->__lruvec)) || __builtin_types_compatible_p(typeof(*(lruvec)), typeof(void)), "pointer type mismatch in container_of()"); ((struct pglist_data *)(__mptr - __builtin_offsetof(struct pglist_data, __lruvec))); });

}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int local_memory_node(int node_id) { return node_id; };
# 1005 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool zone_is_zone_device(struct zone *zone)
{
return false;
}
# 1017 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool managed_zone(struct zone *zone)
{
return zone_managed_pages(zone);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool populated_zone(struct zone *zone)
{
return zone->present_pages;
}
# 1039 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int zone_to_nid(struct zone *zone)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void zone_set_nid(struct zone *zone, int nid) {}


extern int movable_zone;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int is_highmem_idx(enum zone_type idx)
{




return 0;

}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool has_managed_dma(void)
{
return false;
}
# 1075 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int is_highmem(struct zone *zone)
{



return 0;

}


struct ctl_table;

int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
size_t *, loff_t *);
extern int sysctl_lowmem_reserve_ratio[3];
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
size_t *, loff_t *);
int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
int numa_zonelist_order_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern int percpu_pagelist_high_fraction;
extern char numa_zonelist_order[];




extern struct pglist_data contig_page_data;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pglist_data *NODE_DATA(int nid)
{
return &contig_page_data;
}







extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone);
# 1152 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct zone *zonelist_zone(struct zoneref *zoneref)
{
return zoneref->zone;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int zonelist_zone_idx(struct zoneref *zoneref)
{
return zoneref->zone_idx;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int zonelist_node_idx(struct zoneref *zoneref)
{
return zone_to_nid(zoneref->zone);
}

struct zoneref *__next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
nodemask_t *nodes);
# 1186 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
nodemask_t *nodes)
{
if (__builtin_expect(!!(!nodes && zonelist_zone_idx(z) <= highest_zoneidx), 1))
return z;
return __next_zones_zonelist(z, highest_zoneidx, nodes);
}
# 1212 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
nodemask_t *nodes)
{
return next_zones_zonelist(zonelist->_zonerefs,
highest_zoneidx, nodes);
}
# 1257 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool movable_only_nodes(nodemask_t *nodes)
{
struct zonelist *zonelist;
struct zoneref *z;
int nid;

if (__nodes_empty(&(*nodes), (1 << 0)))
return false;






nid = __first_node(&(*nodes));
zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
return (!z->zone) ? true : false;
}
# 1307 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long pfn_to_section_nr(unsigned long pfn)
{
return pfn >> (27 - (12));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long section_nr_to_pfn(unsigned long sec)
{
return sec << (27 - (12));
}
# 1335 "./include/linux/mmzone.h"
struct mem_section_usage {

unsigned long subsection_map[((((1UL << (27 - 21))) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))];


unsigned long pageblock_flags[0];
};

void subsection_map_init(unsigned long pfn, unsigned long nr_pages);

struct page;
struct page_ext;
struct mem_section {
# 1360 "./include/linux/mmzone.h"
unsigned long section_mem_map;

struct mem_section_usage *usage;





struct page_ext *page_ext;
unsigned long pad;





};
# 1388 "./include/linux/mmzone.h"
extern struct mem_section **mem_section;




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long *section_to_usemap(struct mem_section *ms)
{
return ms->usage->pageblock_flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_section *__nr_to_section(unsigned long nr)
{
unsigned long root = ((nr) / (((1UL) << (12)) / sizeof (struct mem_section)));

if (__builtin_expect(!!(root >= ((((1UL << (56 - 27))) + ((((1UL) << (12)) / sizeof (struct mem_section))) - 1) / ((((1UL) << (12)) / sizeof (struct mem_section))))), 0))
return ((void *)0);


if (!mem_section || !mem_section[root])
return ((void *)0);

return &mem_section[root][nr & ((((1UL) << (12)) / sizeof (struct mem_section)) - 1)];
}
extern size_t mem_section_usage_size(void);
# 1435 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *__section_mem_map_addr(struct mem_section *section)
{
unsigned long map = section->section_mem_map;
map &= (~((1UL<<5)-1));
return (struct page *)map;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int present_section(struct mem_section *section)
{
return (section && (section->section_mem_map & (1UL<<0)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int present_section_nr(unsigned long nr)
{
return present_section(__nr_to_section(nr));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int valid_section(struct mem_section *section)
{
return (section && (section->section_mem_map & (1UL<<1)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int early_section(struct mem_section *section)
{
return (section && (section->section_mem_map & (1UL<<3)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int valid_section_nr(unsigned long nr)
{
return valid_section(__nr_to_section(nr));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int online_section(struct mem_section *section)
{
return (section && (section->section_mem_map & (1UL<<2)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int online_device_section(struct mem_section *section)
{
unsigned long flags = (1UL<<2) | (1UL<<4);

return section && ((section->section_mem_map & flags) == flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int online_section_nr(unsigned long nr)
{
return online_section(__nr_to_section(nr));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_section *__pfn_to_section(unsigned long pfn)
{
return __nr_to_section(pfn_to_section_nr(pfn));
}

extern unsigned long __highest_present_section_nr;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int subsection_map_index(unsigned long pfn)
{
return (pfn & ~((~((1UL << (27 - (12)))-1)))) / (1UL << (21 - (12)));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
int idx = subsection_map_index(pfn);

return arch_test_bit(idx, ms->usage->subsection_map);
}
# 1527 "./include/linux/mmzone.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pfn_valid(unsigned long pfn)
{
struct mem_section *ms;







if (((unsigned long)((((phys_addr_t)(pfn) << (12))) >> (12))) != pfn)
return 0;

if (pfn_to_section_nr(pfn) >= (1UL << (56 - 27)))
return 0;
ms = __pfn_to_section(pfn);
if (!valid_section(ms))
return 0;




return early_section(ms) || pfn_section_valid(ms, pfn);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pfn_in_present_section(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= (1UL << (56 - 27)))
return 0;
return present_section(__pfn_to_section(pfn));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long next_present_section_nr(unsigned long section_nr)
{
while (++section_nr <= __highest_present_section_nr) {
if (present_section_nr(section_nr))
return section_nr;
}

return -1;
}
# 1585 "./include/linux/mmzone.h"
void sparse_init(void);
# 7 "./include/linux/gfp.h" 2


# 1 "./include/linux/topology.h" 1
# 30 "./include/linux/topology.h"
# 1 "./include/linux/arch_topology.h" 1
# 11 "./include/linux/arch_topology.h"
void topology_normalize_cpu_scale(void);
int topology_update_cpu_topology(void);





struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);

extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) cpu_scale;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long topology_get_cpu_scale(int cpu)
{
return (*({ do { const void *__vpp_verify = (typeof((&(cpu_scale)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(cpu_scale)))) *)((&(cpu_scale)))); (typeof((typeof(*((&(cpu_scale)))) *)((&(cpu_scale))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }));
}

void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);

extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) arch_freq_scale;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long topology_get_freq_scale(int cpu)
{
return (*({ do { const void *__vpp_verify = (typeof((&(arch_freq_scale)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(arch_freq_scale)))) *)((&(arch_freq_scale)))); (typeof((typeof(*((&(arch_freq_scale)))) *)((&(arch_freq_scale))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }));
}

void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
unsigned long max_freq);
bool topology_scale_freq_invariant(void);

enum scale_freq_source {
SCALE_FREQ_SOURCE_CPUFREQ = 0,
SCALE_FREQ_SOURCE_ARCH,
SCALE_FREQ_SOURCE_CPPC,
};

struct scale_freq_data {
enum scale_freq_source source;
void (*set_freq_scale)(void);
};

void topology_scale_freq_tick(void);
void topology_set_scale_freq_source(struct scale_freq_data *data, const struct cpumask *cpus);
void topology_clear_scale_freq_source(enum scale_freq_source source, const struct cpumask *cpus);

extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) thermal_pressure;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long topology_get_thermal_pressure(int cpu)
{
return (*({ do { const void *__vpp_verify = (typeof((&(thermal_pressure)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(thermal_pressure)))) *)((&(thermal_pressure)))); (typeof((typeof(*((&(thermal_pressure)))) *)((&(thermal_pressure))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }));
}

void topology_update_thermal_pressure(const struct cpumask *cpus,
unsigned long capped_freq);

struct cpu_topology {
int thread_id;
int core_id;
int cluster_id;
int package_id;
int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
cpumask_t cluster_sibling;
cpumask_t llc_sibling;
};


extern struct cpu_topology cpu_topology[32];
# 88 "./include/linux/arch_topology.h"
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
const struct cpumask *cpu_clustergroup_mask(int cpu);
void update_siblings_masks(unsigned int cpu);
void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
int parse_acpi_topology(void);
# 31 "./include/linux/topology.h" 2





# 1 "./arch/riscv/include/generated/asm/topology.h" 1
# 1 "./include/asm-generic/topology.h" 1
# 2 "./arch/riscv/include/generated/asm/topology.h" 2
# 37 "./include/linux/topology.h" 2
# 46 "./include/linux/topology.h"
int arch_update_cpu_topology(void);
# 76 "./include/linux/topology.h"
extern int node_reclaim_distance;
# 118 "./include/linux/topology.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int numa_node_id(void)
{
return ((void)((((struct thread_info *)get_current())->cpu)),0);
}
# 168 "./include/linux/topology.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int numa_mem_id(void)
{
return numa_node_id();
}
# 250 "./include/linux/topology.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct cpumask *cpu_cpu_mask(int cpu)
{
return ((void)(((void)(cpu),0)), ((const struct cpumask *)&__cpu_online_mask));
}
# 10 "./include/linux/gfp.h" 2
# 25 "./include/linux/gfp.h"
struct vm_area_struct;
# 360 "./include/linux/gfp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int gfp_migratetype(const gfp_t gfp_flags)
{
(void)({ int __ret_warn_on = !!((gfp_flags & ((( gfp_t)0x10u)|(( gfp_t)0x08u))) == ((( gfp_t)0x10u)|(( gfp_t)0x08u))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/gfp.h"), "i" (362), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
do { __attribute__((__noreturn__)) extern void __compiletime_assert_155(void) ; if (!(!((1UL << 3) != 0x08u))) __compiletime_assert_155(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_156(void) ; if (!(!((0x08u >> 3) != MIGRATE_MOVABLE))) __compiletime_assert_156(); } while (0);

if (__builtin_expect(!!(page_group_by_mobility_disabled), 0))
return MIGRATE_UNMOVABLE;


return (gfp_flags & ((( gfp_t)0x10u)|(( gfp_t)0x08u))) >> 3;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
return !!(gfp_flags & (( gfp_t)0x400u));
}
# 397 "./include/linux/gfp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool gfpflags_normal_context(const gfp_t gfp_flags)
{
return (gfp_flags & ((( gfp_t)0x400u) | (( gfp_t)0x20000u))) ==
(( gfp_t)0x400u);
}
# 493 "./include/linux/gfp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) enum zone_type gfp_zone(gfp_t flags)
{
enum zone_type z;
int bit = ( int) (flags & ((( gfp_t)0x01u)|(( gfp_t)0x02u)|(( gfp_t)0x04u)|(( gfp_t)0x08u)));

z = (( (ZONE_NORMAL << 0 * 2) | (ZONE_NORMAL << 0x01u * 2) | (ZONE_NORMAL << 0x02u * 2) | (ZONE_DMA32 << 0x04u * 2) | (ZONE_NORMAL << 0x08u * 2) | (ZONE_NORMAL << (0x08u | 0x01u) * 2) | (ZONE_MOVABLE << (0x08u | 0x02u) * 2) | (ZONE_DMA32 << (0x08u | 0x04u) * 2)) >> (bit * 2)) &
((1 << 2) - 1);
do { if (__builtin_expect(!!((( 1 << (0x01u | 0x02u) | 1 << (0x01u | 0x04u) | 1 << (0x04u | 0x02u) | 1 << (0x01u | 0x04u | 0x02u) | 1 << (0x08u | 0x02u | 0x01u) | 1 << (0x08u | 0x04u | 0x01u) | 1 << (0x08u | 0x04u | 0x02u) | 1 << (0x08u | 0x04u | 0x01u | 0x02u) ) >> bit) & 1), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/gfp.h"), "i" (500), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
return z;
}
# 511 "./include/linux/gfp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int gfp_zonelist(gfp_t flags)
{




return ZONELIST_FALLBACK;
}
# 529 "./include/linux/gfp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct zonelist *node_zonelist(int nid, gfp_t flags)
{
return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_free_page(struct page *page, int order) { }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_alloc_page(struct page *page, int order) { }


struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask);
struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask);

unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages,
struct list_head *page_list,
struct page **page_array);

unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
unsigned long nr_pages,
struct page **page_array);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
{
return __alloc_pages_bulk(gfp, numa_mem_id(), ((void *)0), nr_pages, list, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array)
{
return __alloc_pages_bulk(gfp, numa_mem_id(), ((void *)0), nr_pages, ((void *)0), page_array);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array)
{
if (nid == (-1))
nid = numa_mem_id();

return __alloc_pages_bulk(gfp, nid, ((void *)0), nr_pages, ((void *)0), page_array);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
do { if (__builtin_expect(!!(nid < 0 || nid >= (1 << 0)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/gfp.h"), "i" (584), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
(void)({ int __ret_warn_on = !!((gfp_mask & (( gfp_t)0x200000u)) && !node_state((nid), N_ONLINE)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/gfp.h"), "i" (585), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });

return __alloc_pages(gfp_mask, order, nid, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
{
do { if (__builtin_expect(!!(nid < 0 || nid >= (1 << 0)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/gfp.h"), "i" (593), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
(void)({ int __ret_warn_on = !!((gfp & (( gfp_t)0x200000u)) && !node_state((nid), N_ONLINE)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/gfp.h"), "i" (594), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });

return __folio_alloc(gfp, order, nid, ((void *)0));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
if (nid == (-1))
nid = numa_mem_id();

return __alloc_pages_node(nid, gfp_mask, order);
}
# 624 "./include/linux/gfp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
{
return alloc_pages_node(numa_node_id(), gfp_mask, order);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct folio *folio_alloc(gfp_t gfp, unsigned int order)
{
return __folio_alloc_node(gfp, order, numa_node_id());
}
# 643 "./include/linux/gfp.h"
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);

void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
void free_pages_exact(void *virt, size_t size);
__attribute__((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((patchable_function_entry(0, 0))) void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __attribute__((__alloc_size__(2))) __attribute__((__malloc__));







extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);

struct page_frag_cache;
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
extern void *page_frag_alloc_align(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask,
unsigned int align_mask);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *page_frag_alloc(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask)
{
return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
}

extern void page_frag_free(void *addr);




void page_alloc_init(void);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);

void page_alloc_init_late(void);
# 690 "./include/linux/gfp.h"
extern gfp_t gfp_allowed_mask;


bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);

extern void pm_restrict_gfp_mask(void);
extern void pm_restore_gfp_mask(void);

extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pm_suspended_storage(void)
{
return false;
}




extern int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask);
extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
int nid, nodemask_t *nodemask);

void free_contig_range(unsigned long pfn, unsigned long nr_pages);



extern void init_cma_reserved_pageblock(struct page *page);
# 16 "./include/linux/xarray.h" 2
# 53 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_mk_value(unsigned long v)
{
({ int __ret_warn_on = !!((long)v < 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/xarray.h"), "i" (55), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return (void *)((v << 1) | 1);
}
# 66 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long xa_to_value(const void *entry)
{
return (unsigned long)entry >> 1;
}
# 78 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xa_is_value(const void *entry)
{
return (unsigned long)entry & 1;
}
# 96 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_tag_pointer(void *p, unsigned long tag)
{
return (void *)((unsigned long)p | tag);
}
# 111 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_untag_pointer(void *entry)
{
return (void *)((unsigned long)entry & ~3UL);
}
# 126 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int xa_pointer_tag(void *entry)
{
return (unsigned long)entry & 3UL;
}
# 144 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_mk_internal(unsigned long v)
{
return (void *)((v << 2) | 2);
}
# 156 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long xa_to_internal(const void *entry)
{
return (unsigned long)entry >> 2;
}
# 168 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xa_is_internal(const void *entry)
{
return ((unsigned long)entry & 3) == 2;
}
# 184 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xa_is_zero(const void *entry)
{
return __builtin_expect(!!(entry == xa_mk_internal(257)), 0);
}
# 200 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xa_is_err(const void *entry)
{
return __builtin_expect(!!(xa_is_internal(entry) && entry >= xa_mk_internal(-4095)), 0);

}
# 218 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xa_err(void *entry)
{

if (xa_is_err(entry))
return (long)entry >> 2;
return 0;
}
# 238 "./include/linux/xarray.h"
struct xa_limit {
u32 max;
u32 min;
};







typedef unsigned xa_mark_t;







enum xa_lock_type {
XA_LOCK_IRQ = 1,
XA_LOCK_BH = 2,
};
# 295 "./include/linux/xarray.h"
struct xarray {
spinlock_t xa_lock;

gfp_t xa_flags;
void * xa_head;
};
# 350 "./include/linux/xarray.h"
void *xa_load(struct xarray *, unsigned long index);
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *xa_erase(struct xarray *, unsigned long index);
void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
void *entry, gfp_t);
bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
void *xa_find(struct xarray *xa, unsigned long *index,
unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
void *xa_find_after(struct xarray *xa, unsigned long *index,
unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
unsigned long max, unsigned int n, xa_mark_t);
void xa_destroy(struct xarray *);
# 377 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xa_init_flags(struct xarray *xa, gfp_t flags)
{
do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(&xa->xa_lock), "&xa->xa_lock", &__key, LD_WAIT_CONFIG); } while (0);
xa->xa_flags = flags;
xa->xa_head = ((void *)0);
}
# 392 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xa_init(struct xarray *xa)
{
xa_init_flags(xa, 0);
}
# 404 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xa_empty(const struct xarray *xa)
{
return xa->xa_head == ((void *)0);
}
# 417 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xa_marked(const struct xarray *xa, xa_mark_t mark)
{
return xa->xa_flags & (( gfp_t)((1U << (27 + 1)) << ( unsigned)(mark)));
}
# 557 "./include/linux/xarray.h"
void *__xa_erase(struct xarray *, unsigned long index);
void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
void *entry, gfp_t);
int __attribute__((__warn_unused_result__)) __xa_insert(struct xarray *, unsigned long index,
void *entry, gfp_t);
int __attribute__((__warn_unused_result__)) __xa_alloc(struct xarray *, u32 *id, void *entry,
struct xa_limit, gfp_t);
int __attribute__((__warn_unused_result__)) __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry,
struct xa_limit, u32 *next, gfp_t);
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
# 584 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_store_bh(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
void *curr;

spin_lock_bh(&(xa)->xa_lock);
curr = __xa_store(xa, index, entry, gfp);
spin_unlock_bh(&(xa)->xa_lock);

return curr;
}
# 610 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_store_irq(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
{
void *curr;

spin_lock_irq(&(xa)->xa_lock);
curr = __xa_store(xa, index, entry, gfp);
spin_unlock_irq(&(xa)->xa_lock);

return curr;
}
# 635 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_erase_bh(struct xarray *xa, unsigned long index)
{
void *entry;

spin_lock_bh(&(xa)->xa_lock);
entry = __xa_erase(xa, index);
spin_unlock_bh(&(xa)->xa_lock);

return entry;
}
# 659 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_erase_irq(struct xarray *xa, unsigned long index)
{
void *entry;

spin_lock_irq(&(xa)->xa_lock);
entry = __xa_erase(xa, index);
spin_unlock_irq(&(xa)->xa_lock);

return entry;
}
# 685 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_cmpxchg(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
void *curr;

spin_lock(&(xa)->xa_lock);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
spin_unlock(&(xa)->xa_lock);

return curr;
}
# 712 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
void *curr;

spin_lock_bh(&(xa)->xa_lock);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
spin_unlock_bh(&(xa)->xa_lock);

return curr;
}
# 739 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
void *curr;

spin_lock_irq(&(xa)->xa_lock);
curr = __xa_cmpxchg(xa, index, old, entry, gfp);
spin_unlock_irq(&(xa)->xa_lock);

return curr;
}
# 768 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) xa_insert(struct xarray *xa,
unsigned long index, void *entry, gfp_t gfp)
{
int err;

spin_lock(&(xa)->xa_lock);
err = __xa_insert(xa, index, entry, gfp);
spin_unlock(&(xa)->xa_lock);

return err;
}
# 797 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) xa_insert_bh(struct xarray *xa,
unsigned long index, void *entry, gfp_t gfp)
{
int err;

spin_lock_bh(&(xa)->xa_lock);
err = __xa_insert(xa, index, entry, gfp);
spin_unlock_bh(&(xa)->xa_lock);

return err;
}
# 826 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) xa_insert_irq(struct xarray *xa,
unsigned long index, void *entry, gfp_t gfp)
{
int err;

spin_lock_irq(&(xa)->xa_lock);
err = __xa_insert(xa, index, entry, gfp);
spin_unlock_irq(&(xa)->xa_lock);

return err;
}
# 855 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) int xa_alloc(struct xarray *xa, u32 *id,
void *entry, struct xa_limit limit, gfp_t gfp)
{
int err;

spin_lock(&(xa)->xa_lock);
err = __xa_alloc(xa, id, entry, limit, gfp);
spin_unlock(&(xa)->xa_lock);

return err;
}
# 884 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) xa_alloc_bh(struct xarray *xa, u32 *id,
void *entry, struct xa_limit limit, gfp_t gfp)
{
int err;

spin_lock_bh(&(xa)->xa_lock);
err = __xa_alloc(xa, id, entry, limit, gfp);
spin_unlock_bh(&(xa)->xa_lock);

return err;
}
# 913 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) xa_alloc_irq(struct xarray *xa, u32 *id,
void *entry, struct xa_limit limit, gfp_t gfp)
{
int err;

spin_lock_irq(&(xa)->xa_lock);
err = __xa_alloc(xa, id, entry, limit, gfp);
spin_unlock_irq(&(xa)->xa_lock);

return err;
}
# 946 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
struct xa_limit limit, u32 *next, gfp_t gfp)
{
int err;

spin_lock(&(xa)->xa_lock);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
spin_unlock(&(xa)->xa_lock);

return err;
}
# 979 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
struct xa_limit limit, u32 *next, gfp_t gfp)
{
int err;

spin_lock_bh(&(xa)->xa_lock);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
spin_unlock_bh(&(xa)->xa_lock);

return err;
}
# 1012 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
struct xa_limit limit, u32 *next, gfp_t gfp)
{
int err;

spin_lock_irq(&(xa)->xa_lock);
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
spin_unlock_irq(&(xa)->xa_lock);

return err;
}
# 1042 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__))
int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
{
return xa_err(xa_cmpxchg(xa, index, ((void *)0), xa_mk_internal(257), gfp));
}
# 1060 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__))
int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
{
return xa_err(xa_cmpxchg_bh(xa, index, ((void *)0), xa_mk_internal(257), gfp));
}
# 1078 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__))
int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
{
return xa_err(xa_cmpxchg_irq(xa, index, ((void *)0), xa_mk_internal(257), gfp));
}
# 1093 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xa_release(struct xarray *xa, unsigned long index)
{
xa_cmpxchg(xa, index, xa_mk_internal(257), ((void *)0), 0);
}
# 1125 "./include/linux/xarray.h"
struct xa_node {
unsigned char shift;
unsigned char offset;
unsigned char count;
unsigned char nr_values;
struct xa_node *parent;
struct xarray *array;
union {
struct list_head private_list;
struct callback_head callback_head;
};
void *slots[(1UL << (0 ? 4 : 6))];
union {
unsigned long tags[3][((((1UL << (0 ? 4 : 6))) + (64) - 1) / (64))];
unsigned long marks[3][((((1UL << (0 ? 4 : 6))) + (64) - 1) / (64))];
};
};

void xa_dump(const struct xarray *);
void xa_dump_node(const struct xa_node *);
# 1165 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_head(const struct xarray *xa)
{
return ({ typeof(*(xa->xa_head)) *__UNIQUE_ID_rcu157 = (typeof(*(xa->xa_head)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_158(void) ; if (!((sizeof((xa->xa_head)) == sizeof(char) || sizeof((xa->xa_head)) == sizeof(short) || sizeof((xa->xa_head)) == sizeof(int) || sizeof((xa->xa_head)) == sizeof(long)) || sizeof((xa->xa_head)) == sizeof(long long))) __compiletime_assert_158(); } while (0); (*(const volatile typeof( _Generic(((xa->xa_head)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((xa->xa_head)))) *)&((xa->xa_head))); }); do { } while (0 && (!((lock_is_held(&(&xa->xa_lock)->dep_map)) || rcu_read_lock_held()))); ; ((typeof(*(xa->xa_head)) *)(__UNIQUE_ID_rcu157)); });

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_head_locked(const struct xarray *xa)
{
return ({ do { } while (0 && (!((lock_is_held(&(&xa->xa_lock)->dep_map))))); ; ((typeof(*(xa->xa_head)) *)((xa->xa_head))); });

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_entry(const struct xarray *xa,
const struct xa_node *node, unsigned int offset)
{
do { } while (0);
return ({ typeof(*(node->slots[offset])) *__UNIQUE_ID_rcu159 = (typeof(*(node->slots[offset])) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_160(void) ; if (!((sizeof((node->slots[offset])) == sizeof(char) || sizeof((node->slots[offset])) == sizeof(short) || sizeof((node->slots[offset])) == sizeof(int) || sizeof((node->slots[offset])) == sizeof(long)) || sizeof((node->slots[offset])) == sizeof(long long))) __compiletime_assert_160(); } while (0); (*(const volatile typeof( _Generic(((node->slots[offset])), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((node->slots[offset])))) *)&((node->slots[offset]))); }); do { } while (0 && (!((lock_is_held(&(&xa->xa_lock)->dep_map)) || rcu_read_lock_held()))); ; ((typeof(*(node->slots[offset])) *)(__UNIQUE_ID_rcu159)); });

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_entry_locked(const struct xarray *xa,
const struct xa_node *node, unsigned int offset)
{
do { } while (0);
return ({ do { } while (0 && (!((lock_is_held(&(&xa->xa_lock)->dep_map))))); ; ((typeof(*(node->slots[offset])) *)((node->slots[offset]))); });

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct xa_node *xa_parent(const struct xarray *xa,
const struct xa_node *node)
{
return ({ typeof(*(node->parent)) *__UNIQUE_ID_rcu161 = (typeof(*(node->parent)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_162(void) ; if (!((sizeof((node->parent)) == sizeof(char) || sizeof((node->parent)) == sizeof(short) || sizeof((node->parent)) == sizeof(int) || sizeof((node->parent)) == sizeof(long)) || sizeof((node->parent)) == sizeof(long long))) __compiletime_assert_162(); } while (0); (*(const volatile typeof( _Generic(((node->parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((node->parent)))) *)&((node->parent))); }); do { } while (0 && (!((lock_is_held(&(&xa->xa_lock)->dep_map)) || rcu_read_lock_held()))); ; ((typeof(*(node->parent)) *)(__UNIQUE_ID_rcu161)); });

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct xa_node *xa_parent_locked(const struct xarray *xa,
const struct xa_node *node)
{
return ({ do { } while (0 && (!((lock_is_held(&(&xa->xa_lock)->dep_map))))); ; ((typeof(*(node->parent)) *)((node->parent))); });

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_mk_node(const struct xa_node *node)
{
return (void *)((unsigned long)node | 2);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct xa_node *xa_to_node(const void *entry)
{
return (struct xa_node *)((unsigned long)entry - 2);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xa_is_node(const void *entry)
{
return xa_is_internal(entry) && (unsigned long)entry > 4096;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xa_mk_sibling(unsigned int offset)
{
return xa_mk_internal(offset);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long xa_to_sibling(const void *entry)
{
return xa_to_internal(entry);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xa_is_sibling(const void *entry)
{
return 1 && xa_is_internal(entry) &&
(entry < xa_mk_sibling((1UL << (0 ? 4 : 6)) - 1));
}
# 1262 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xa_is_retry(const void *entry)
{
return __builtin_expect(!!(entry == xa_mk_internal(256)), 0);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xa_is_advanced(const void *entry)
{
return xa_is_internal(entry) && (entry <= xa_mk_internal(256));
}
# 1290 "./include/linux/xarray.h"
typedef void (*xa_update_node_t)(struct xa_node *node);

void xa_delete_node(struct xa_node *, xa_update_node_t);
# 1311 "./include/linux/xarray.h"
struct xa_state {
struct xarray *xa;
unsigned long xa_index;
unsigned char xa_shift;
unsigned char xa_sibs;
unsigned char xa_offset;
unsigned char xa_pad;
struct xa_node *xa_node;
struct xa_node *xa_alloc;
xa_update_node_t xa_update;
struct list_lru *xa_lru;
};
# 1392 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xas_error(const struct xa_state *xas)
{
return xa_err(xas->xa_node);
}
# 1406 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xas_set_err(struct xa_state *xas, long err)
{
xas->xa_node = ((struct xa_node *)(((unsigned long)err << 2) | 2UL));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xas_invalid(const struct xa_state *xas)
{
return (unsigned long)xas->xa_node & 3;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xas_valid(const struct xa_state *xas)
{
return !xas_invalid(xas);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xas_is_node(const struct xa_state *xas)
{
return xas_valid(xas) && xas->xa_node;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xas_not_node(struct xa_node *node)
{
return ((unsigned long)node & 3) || !node;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xas_frozen(struct xa_node *node)
{
return (unsigned long)node & 2;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xas_top(struct xa_node *node)
{
return node <= ((struct xa_node *)3UL);
}
# 1472 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xas_reset(struct xa_state *xas)
{
xas->xa_node = ((struct xa_node *)3UL);
}
# 1489 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xas_retry(struct xa_state *xas, const void *entry)
{
if (xa_is_zero(entry))
return true;
if (!xa_is_retry(entry))
return false;
xas_reset(xas);
return true;
}

void *xas_load(struct xa_state *);
void *xas_store(struct xa_state *, void *entry);
void *xas_find(struct xa_state *, unsigned long max);
void *xas_find_conflict(struct xa_state *);

bool xas_get_mark(const struct xa_state *, xa_mark_t);
void xas_set_mark(const struct xa_state *, xa_mark_t);
void xas_clear_mark(const struct xa_state *, xa_mark_t);
void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
void xas_init_marks(const struct xa_state *);

bool xas_nomem(struct xa_state *, gfp_t);
void xas_pause(struct xa_state *);

void xas_create_range(struct xa_state *);


int xa_get_order(struct xarray *, unsigned long index);
void xas_split(struct xa_state *, void *entry, unsigned int order);
void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
# 1551 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xas_reload(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;
void *entry;
char offset;

if (!node)
return xa_head(xas->xa);
if (1) {
offset = (xas->xa_index >> node->shift) & ((1UL << (0 ? 4 : 6)) - 1);
entry = xa_entry(xas->xa, node, offset);
if (!xa_is_sibling(entry))
return entry;
offset = xa_to_sibling(entry);
} else {
offset = xas->xa_offset;
}
return xa_entry(xas->xa, node, offset);
}
# 1580 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xas_set(struct xa_state *xas, unsigned long index)
{
xas->xa_index = index;
xas->xa_node = ((struct xa_node *)3UL);
}
# 1596 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xas_advance(struct xa_state *xas, unsigned long index)
{
unsigned char shift = xas_is_node(xas) ? xas->xa_node->shift : 0;

xas->xa_index = index;
xas->xa_offset = (index >> shift) & ((1UL << (0 ? 4 : 6)) - 1);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xas_set_order(struct xa_state *xas, unsigned long index,
unsigned int order)
{

xas->xa_index = order < 64 ? (index >> order) << order : 0;
xas->xa_shift = order - (order % (0 ? 4 : 6));
xas->xa_sibs = (1 << (order % (0 ? 4 : 6))) - 1;
xas->xa_node = ((struct xa_node *)3UL);




}
# 1632 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xas_set_update(struct xa_state *xas, xa_update_node_t update)
{
xas->xa_update = update;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xas_set_lru(struct xa_state *xas, struct list_lru *lru)
{
xas->xa_lru = lru;
}
# 1653 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xas_next_entry(struct xa_state *xas, unsigned long max)
{
struct xa_node *node = xas->xa_node;
void *entry;

if (__builtin_expect(!!(xas_not_node(node) || node->shift || xas->xa_offset != (xas->xa_index & ((1UL << (0 ? 4 : 6)) - 1))), 0))

return xas_find(xas, max);

do {
if (__builtin_expect(!!(xas->xa_index >= max), 0))
return xas_find(xas, max);
if (__builtin_expect(!!(xas->xa_offset == ((1UL << (0 ? 4 : 6)) - 1)), 0))
return xas_find(xas, max);
entry = xa_entry(xas->xa, node, xas->xa_offset + 1);
if (__builtin_expect(!!(xa_is_internal(entry)), 0))
return xas_find(xas, max);
xas->xa_offset++;
xas->xa_index++;
} while (!entry);

return entry;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int xas_find_chunk(struct xa_state *xas, bool advance,
xa_mark_t mark)
{
unsigned long *addr = xas->xa_node->marks[( unsigned)mark];
unsigned int offset = xas->xa_offset;

if (advance)
offset++;
if ((1UL << (0 ? 4 : 6)) == 64) {
if (offset < (1UL << (0 ? 4 : 6))) {
unsigned long data = *addr & (~0UL << offset);
if (data)
return __ffs(data);
}
return (1UL << (0 ? 4 : 6));
}

return find_next_bit(addr, (1UL << (0 ? 4 : 6)), offset);
}
# 1710 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xas_next_marked(struct xa_state *xas, unsigned long max,
xa_mark_t mark)
{
struct xa_node *node = xas->xa_node;
void *entry;
unsigned int offset;

if (__builtin_expect(!!(xas_not_node(node) || node->shift), 0))
return xas_find_marked(xas, max, mark);
offset = xas_find_chunk(xas, true, mark);
xas->xa_offset = offset;
xas->xa_index = (xas->xa_index & ~((1UL << (0 ? 4 : 6)) - 1)) + offset;
if (xas->xa_index > max)
return ((void *)0);
if (offset == (1UL << (0 ? 4 : 6)))
return xas_find_marked(xas, max, mark);
entry = xa_entry(xas->xa, node, offset);
if (!entry)
return xas_find_marked(xas, max, mark);
return entry;
}





enum {
XA_CHECK_SCHED = 4096,
};
# 1790 "./include/linux/xarray.h"
void *__xas_next(struct xa_state *);
void *__xas_prev(struct xa_state *);
# 1809 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xas_prev(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;

if (__builtin_expect(!!(xas_not_node(node) || node->shift || xas->xa_offset == 0), 0))

return __xas_prev(xas);

xas->xa_index--;
xas->xa_offset--;
return xa_entry(xas->xa, node, xas->xa_offset);
}
# 1838 "./include/linux/xarray.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xas_next(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;

if (__builtin_expect(!!(xas_not_node(node) || node->shift || xas->xa_offset == ((1UL << (0 ? 4 : 6)) - 1)), 0))

return __xas_next(xas);

xas->xa_index++;
xas->xa_offset++;
return xa_entry(xas->xa, node, xas->xa_offset);
}
# 15 "./include/linux/list_lru.h" 2

struct mem_cgroup;


enum lru_status {
LRU_REMOVED,
LRU_REMOVED_RETRY,

LRU_ROTATE,
LRU_SKIP,
LRU_RETRY,

};

struct list_lru_one {
struct list_head list;

long nr_items;
};

struct list_lru_memcg {
struct callback_head rcu;

struct list_lru_one node[];
};

struct list_lru_node {

spinlock_t lock;

struct list_lru_one lru;
long nr_items;
} __attribute__((__aligned__((1 << 6))));

struct list_lru {
struct list_lru_node *node;






};

void list_lru_destroy(struct list_lru *lru);
int __list_lru_init(struct list_lru *lru, bool memcg_aware,
struct lock_class_key *key, struct shrinker *shrinker);
# 70 "./include/linux/list_lru.h"
int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
gfp_t gfp);
void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent);
# 90 "./include/linux/list_lru.h"
bool list_lru_add(struct list_lru *lru, struct list_head *item);
# 103 "./include/linux/list_lru.h"
bool list_lru_del(struct list_lru *lru, struct list_head *item);
# 115 "./include/linux/list_lru.h"
unsigned long list_lru_count_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg);
unsigned long list_lru_count_node(struct list_lru *lru, int nid);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long list_lru_shrink_count(struct list_lru *lru,
struct shrink_control *sc)
{
return list_lru_count_one(lru, sc->nid, sc->memcg);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long list_lru_count(struct list_lru *lru)
{
long count = 0;
int nid;

for ( (nid) = 0; (nid) == 0; (nid) = 1)
count += list_lru_count_node(lru, nid);

return count;
}

void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
struct list_head *head);

typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
# 165 "./include/linux/list_lru.h"
unsigned long list_lru_walk_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
# 182 "./include/linux/list_lru.h"
unsigned long list_lru_walk_one_irq(struct list_lru *lru,
int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
list_lru_walk_cb isolate, void *cb_arg)
{
return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
&sc->nr_to_scan);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
list_lru_walk_cb isolate, void *cb_arg)
{
return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
&sc->nr_to_scan);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
void *cb_arg, unsigned long nr_to_walk)
{
long isolated = 0;
int nid;

for ( (nid) = 0; (nid) == 0; (nid) = 1) {
isolated += list_lru_walk_node(lru, nid, isolate,
cb_arg, &nr_to_walk);
if (nr_to_walk <= 0)
break;
}
return isolated;
}
# 14 "./include/linux/fs.h" 2

# 1 "./include/linux/radix-tree.h" 1
# 28 "./include/linux/radix-tree.h"
struct radix_tree_preload {
local_lock_t lock;
unsigned nr;

struct xa_node *nodes;
};
extern __attribute__((section(".data..percpu" ""))) __typeof__(struct radix_tree_preload) radix_tree_preloads;
# 55 "./include/linux/radix-tree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool radix_tree_is_internal_node(void *ptr)
{
return ((unsigned long)ptr & 3UL) ==
2UL;
}
# 86 "./include/linux/radix-tree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool radix_tree_empty(const struct xarray *root)
{
return root->xa_head == ((void *)0);
}
# 106 "./include/linux/radix-tree.h"
struct radix_tree_iter {
unsigned long index;
unsigned long next_index;
unsigned long tags;
struct xa_node *node;
};
# 177 "./include/linux/radix-tree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *radix_tree_deref_slot(void **slot)
{
return ({ typeof(*(*slot)) *__UNIQUE_ID_rcu163 = (typeof(*(*slot)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_164(void) ; if (!((sizeof((*slot)) == sizeof(char) || sizeof((*slot)) == sizeof(short) || sizeof((*slot)) == sizeof(int) || sizeof((*slot)) == sizeof(long)) || sizeof((*slot)) == sizeof(long long))) __compiletime_assert_164(); } while (0); (*(const volatile typeof( _Generic(((*slot)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((*slot)))) *)&((*slot))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(*slot)) *)(__UNIQUE_ID_rcu163)); });
}
# 191 "./include/linux/radix-tree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *radix_tree_deref_slot_protected(void **slot,
spinlock_t *treelock)
{
return ({ do { } while (0 && (!((lock_is_held(&(treelock)->dep_map))))); ; ((typeof(*(*slot)) *)((*slot))); });
}
# 204 "./include/linux/radix-tree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int radix_tree_deref_retry(void *arg)
{
return __builtin_expect(!!(radix_tree_is_internal_node(arg)), 0);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int radix_tree_exception(void *arg)
{
return __builtin_expect(!!((unsigned long)arg & 3UL), 0);
}

int radix_tree_insert(struct xarray *, unsigned long index,
void *);
void *__radix_tree_lookup(const struct xarray *, unsigned long index,
struct xa_node **nodep, void ***slotp);
void *radix_tree_lookup(const struct xarray *, unsigned long);
void **radix_tree_lookup_slot(const struct xarray *,
unsigned long index);
void __radix_tree_replace(struct xarray *, struct xa_node *,
void **slot, void *entry);
void radix_tree_iter_replace(struct xarray *,
const struct radix_tree_iter *, void **slot, void *entry);
void radix_tree_replace_slot(struct xarray *,
void **slot, void *entry);
void radix_tree_iter_delete(struct xarray *,
struct radix_tree_iter *iter, void **slot);
void *radix_tree_delete_item(struct xarray *, unsigned long, void *);
void *radix_tree_delete(struct xarray *, unsigned long);
unsigned int radix_tree_gang_lookup(const struct xarray *,
void **results, unsigned long first_index,
unsigned int max_items);
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
void radix_tree_init(void);
void *radix_tree_tag_set(struct xarray *,
unsigned long index, unsigned int tag);
void *radix_tree_tag_clear(struct xarray *,
unsigned long index, unsigned int tag);
int radix_tree_tag_get(const struct xarray *,
unsigned long index, unsigned int tag);
void radix_tree_iter_tag_clear(struct xarray *,
const struct radix_tree_iter *iter, unsigned int tag);
unsigned int radix_tree_gang_lookup_tag(const struct xarray *,
void **results, unsigned long first_index,
unsigned int max_items, unsigned int tag);
unsigned int radix_tree_gang_lookup_tag_slot(const struct xarray *,
void ***results, unsigned long first_index,
unsigned int max_items, unsigned int tag);
int radix_tree_tagged(const struct xarray *, unsigned int tag);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void radix_tree_preload_end(void)
{
do { local_lock_release(({ do { const void *__vpp_verify = (typeof((&radix_tree_preloads.lock) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&radix_tree_preloads.lock)) *)(&radix_tree_preloads.lock)); (typeof((typeof(*(&radix_tree_preloads.lock)) *)(&radix_tree_preloads.lock))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0);
}

void **idr_get_free(struct xarray *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max);

enum {
RADIX_TREE_ITER_TAG_MASK = 0x0f,
RADIX_TREE_ITER_TAGGED = 0x10,
RADIX_TREE_ITER_CONTIG = 0x20,
};
# 280 "./include/linux/radix-tree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void **
radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
{
# 291 "./include/linux/radix-tree.h"
iter->index = 0;
iter->next_index = start;
return ((void *)0);
}
# 309 "./include/linux/radix-tree.h"
void **radix_tree_next_chunk(const struct xarray *,
struct radix_tree_iter *iter, unsigned flags);
# 322 "./include/linux/radix-tree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void **
radix_tree_iter_lookup(const struct xarray *root,
struct radix_tree_iter *iter, unsigned long index)
{
radix_tree_iter_init(iter, index);
return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
}
# 339 "./include/linux/radix-tree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__))
void **radix_tree_iter_retry(struct radix_tree_iter *iter)
{
iter->next_index = iter->index;
iter->tags = 0;
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
{
return iter->index + slots;
}
# 363 "./include/linux/radix-tree.h"
void **__attribute__((__warn_unused_result__)) radix_tree_iter_resume(void **slot,
struct radix_tree_iter *iter);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) long
radix_tree_chunk_size(struct radix_tree_iter *iter)
{
return iter->next_index - iter->index;
}
# 397 "./include/linux/radix-tree.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void **radix_tree_next_slot(void **slot,
struct radix_tree_iter *iter, unsigned flags)
{
if (flags & RADIX_TREE_ITER_TAGGED) {
iter->tags >>= 1;
if (__builtin_expect(!!(!iter->tags), 0))
return ((void *)0);
if (__builtin_expect(!!(iter->tags & 1ul), 1)) {
iter->index = __radix_tree_iter_add(iter, 1);
slot++;
goto found;
}
if (!(flags & RADIX_TREE_ITER_CONTIG)) {
unsigned offset = __ffs(iter->tags);

iter->tags >>= offset++;
iter->index = __radix_tree_iter_add(iter, offset);
slot += offset;
goto found;
}
} else {
long count = radix_tree_chunk_size(iter);

while (--count > 0) {
slot++;
iter->index = __radix_tree_iter_add(iter, 1);

if (__builtin_expect(!!(*slot), 1))
goto found;
if (flags & RADIX_TREE_ITER_CONTIG) {

iter->next_index = 0;
break;
}
}
}
return ((void *)0);

found:
return slot;
}
# 16 "./include/linux/fs.h" 2



# 1 "./include/linux/pid.h" 1








enum pid_type
{
PIDTYPE_PID,
PIDTYPE_TGID,
PIDTYPE_PGID,
PIDTYPE_SID,
PIDTYPE_MAX,
};
# 54 "./include/linux/pid.h"
struct upid {
int nr;
struct pid_namespace *ns;
};

struct pid
{
refcount_t count;
unsigned int level;
spinlock_t lock;

struct hlist_head tasks[PIDTYPE_MAX];
struct hlist_head inodes;

wait_queue_head_t wait_pidfd;
struct callback_head rcu;
struct upid numbers[1];
};

extern struct pid init_struct_pid;

extern const struct file_operations pidfd_fops;

struct file;

extern struct pid *pidfd_pid(const struct file *file);
struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags);
struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags);
int pidfd_create(struct pid *pid, unsigned int flags);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pid *get_pid(struct pid *pid)
{
if (pid)
refcount_inc(&pid->count);
return pid;
}

extern void put_pid(struct pid *pid);
extern struct task_struct *pid_task(struct pid *pid, enum pid_type);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pid_has_task(struct pid *pid, enum pid_type type)
{
return !hlist_empty(&pid->tasks[type]);
}
extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type);

extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);




extern void attach_pid(struct task_struct *task, enum pid_type);
extern void detach_pid(struct task_struct *task, enum pid_type);
extern void change_pid(struct task_struct *task, enum pid_type,
struct pid *pid);
extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);

struct pid_namespace;
extern struct pid_namespace init_pid_ns;

extern int pid_max;
extern int pid_max_min, pid_max_max;
# 127 "./include/linux/pid.h"
extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns);
extern struct pid *find_vpid(int nr);




extern struct pid *find_get_pid(int nr);
extern struct pid *find_ge_pid(int nr, struct pid_namespace *);

extern struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
size_t set_tid_size);
extern void free_pid(struct pid *pid);
extern void disable_pid_allocation(struct pid_namespace *ns);
# 151 "./include/linux/pid.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pid_namespace *ns_of_pid(struct pid *pid)
{
struct pid_namespace *ns = ((void *)0);
if (pid)
ns = pid->numbers[pid->level].ns;
return ns;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_child_reaper(struct pid *pid)
{
return pid->numbers[pid->level].nr == 1;
}
# 181 "./include/linux/pid.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t pid_nr(struct pid *pid)
{
pid_t nr = 0;
if (pid)
nr = pid->numbers[0].nr;
return nr;
}

pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
pid_t pid_vnr(struct pid *pid);
# 20 "./include/linux/fs.h" 2





# 1 "./include/linux/semaphore.h" 1
# 15 "./include/linux/semaphore.h"
struct semaphore {
raw_spinlock_t lock;
unsigned int count;
struct list_head wait_list;
};
# 31 "./include/linux/semaphore.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sema_init(struct semaphore *sem, int val)
{
static struct lock_class_key __key;
*sem = (struct semaphore) { .lock = (raw_spinlock_t) { .raw_lock = { 0 }, .magic = 0xdead4ead, .owner_cpu = -1, .owner = ((void *)-1L), .dep_map = { .name = "(*sem).lock", .wait_type_inner = LD_WAIT_SPIN, } }, .count = val, .wait_list = { &((*sem).wait_list), &((*sem).wait_list) }, };
lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
}

extern void down(struct semaphore *sem);
extern int __attribute__((__warn_unused_result__)) down_interruptible(struct semaphore *sem);
extern int __attribute__((__warn_unused_result__)) down_killable(struct semaphore *sem);
extern int __attribute__((__warn_unused_result__)) down_trylock(struct semaphore *sem);
extern int __attribute__((__warn_unused_result__)) down_timeout(struct semaphore *sem, long jiffies);
extern void up(struct semaphore *sem);
# 26 "./include/linux/fs.h" 2




# 1 "./include/linux/migrate_mode.h" 1
# 15 "./include/linux/migrate_mode.h"
enum migrate_mode {
MIGRATE_ASYNC,
MIGRATE_SYNC_LIGHT,
MIGRATE_SYNC,
MIGRATE_SYNC_NO_COPY,
};

enum migrate_reason {
MR_COMPACTION,
MR_MEMORY_FAILURE,
MR_MEMORY_HOTPLUG,
MR_SYSCALL,
MR_MEMPOLICY_MBIND,
MR_NUMA_MISPLACED,
MR_CONTIG_RANGE,
MR_LONGTERM_PIN,
MR_DEMOTION,
MR_TYPES
};
# 31 "./include/linux/fs.h" 2


# 1 "./include/linux/percpu-rwsem.h" 1






# 1 "./include/linux/rcuwait.h" 1





# 1 "./include/linux/sched/signal.h" 1





# 1 "./include/linux/signal.h" 1





# 1 "./include/linux/signal_types.h" 1
# 10 "./include/linux/signal_types.h"
# 1 "./include/uapi/linux/signal.h" 1




# 1 "./arch/riscv/include/generated/uapi/asm/signal.h" 1
# 1 "./include/asm-generic/signal.h" 1




# 1 "./include/uapi/asm-generic/signal.h" 1
# 61 "./include/uapi/asm-generic/signal.h"
typedef struct {
unsigned long sig[(64 / (8 * 8))];
} sigset_t;


typedef unsigned long old_sigset_t;


# 1 "./include/uapi/asm-generic/signal-defs.h" 1
# 82 "./include/uapi/asm-generic/signal-defs.h"
typedef void __signalfn_t(int);
typedef __signalfn_t *__sighandler_t;

typedef void __restorefn_t(void);
typedef __restorefn_t *__sigrestore_t;
# 69 "./include/uapi/asm-generic/signal.h" 2
# 85 "./include/uapi/asm-generic/signal.h"
typedef struct sigaltstack {
void *ss_sp;
int ss_flags;
__kernel_size_t ss_size;
} stack_t;
# 6 "./include/asm-generic/signal.h" 2





# 1 "./arch/riscv/include/uapi/asm/sigcontext.h" 1
# 17 "./arch/riscv/include/uapi/asm/sigcontext.h"
struct sigcontext {
struct user_regs_struct sc_regs;
union __riscv_fp_state sc_fpregs;
};
# 12 "./include/asm-generic/signal.h" 2
# 2 "./arch/riscv/include/generated/uapi/asm/signal.h" 2
# 6 "./include/uapi/linux/signal.h" 2
# 1 "./arch/riscv/include/generated/uapi/asm/siginfo.h" 1
# 1 "./include/uapi/asm-generic/siginfo.h" 1







typedef union sigval {
int sival_int;
void *sival_ptr;
} sigval_t;
# 37 "./include/uapi/asm-generic/siginfo.h"
union __sifields {

struct {
__kernel_pid_t _pid;
__kernel_uid32_t _uid;
} _kill;


struct {
__kernel_timer_t _tid;
int _overrun;
sigval_t _sigval;
int _sys_private;
} _timer;


struct {
__kernel_pid_t _pid;
__kernel_uid32_t _uid;
sigval_t _sigval;
} _rt;


struct {
__kernel_pid_t _pid;
__kernel_uid32_t _uid;
int _status;
__kernel_clock_t _utime;
__kernel_clock_t _stime;
} _sigchld;


struct {
void *_addr;
# 79 "./include/uapi/asm-generic/siginfo.h"
union {

int _trapno;




short _addr_lsb;

struct {
char _dummy_bnd[(__alignof__(void *) < sizeof(short) ? sizeof(short) : __alignof__(void *))];
void *_lower;
void *_upper;
} _addr_bnd;

struct {
char _dummy_pkey[(__alignof__(void *) < sizeof(short) ? sizeof(short) : __alignof__(void *))];
__u32 _pkey;
} _addr_pkey;

struct {
unsigned long _data;
__u32 _type;
} _perf;
};
} _sigfault;


struct {
long _band;
int _fd;
} _sigpoll;


struct {
void *_call_addr;
int _syscall;
unsigned int _arch;
} _sigsys;
};
# 138 "./include/uapi/asm-generic/siginfo.h"
typedef struct siginfo {
union {
struct { int si_signo; int si_errno; int si_code; union __sifields _sifields; };
int _si_pad[128/sizeof(int)];
};
} siginfo_t;
# 333 "./include/uapi/asm-generic/siginfo.h"
typedef struct sigevent {
sigval_t sigev_value;
int sigev_signo;
int sigev_notify;
union {
int _pad[((64 - (sizeof(int) * 2 + sizeof(sigval_t))) / sizeof(int))];
int _tid;

struct {
void (*_function)(sigval_t);
void *_attribute;
} _sigev_thread;
} _sigev_un;
} sigevent_t;
# 2 "./arch/riscv/include/generated/uapi/asm/siginfo.h" 2
# 7 "./include/uapi/linux/signal.h" 2
# 11 "./include/linux/signal_types.h" 2

typedef struct kernel_siginfo {
struct { int si_signo; int si_errno; int si_code; union __sifields _sifields; };
} kernel_siginfo_t;

struct ucounts;





struct sigqueue {
struct list_head list;
int flags;
kernel_siginfo_t info;
struct ucounts *ucounts;
};




struct sigpending {
struct list_head list;
sigset_t signal;
};

struct sigaction {

__sighandler_t sa_handler;
unsigned long sa_flags;







sigset_t sa_mask;
};

struct k_sigaction {
struct sigaction sa;



};
# 67 "./include/linux/signal_types.h"
struct ksignal {
struct k_sigaction ka;
kernel_siginfo_t info;
int sig;
};
# 7 "./include/linux/signal.h" 2


struct task_struct;


extern int print_fatal_signals;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void copy_siginfo(kernel_siginfo_t *to,
const kernel_siginfo_t *from)
{
memcpy(to, from, sizeof(*to));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_siginfo(kernel_siginfo_t *info)
{
memset(info, 0, sizeof(*info));
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void copy_siginfo_to_external(siginfo_t *to,
const kernel_siginfo_t *from)
{
memcpy(to, from, sizeof(*from));
memset(((char *)to) + sizeof(struct kernel_siginfo), 0,
(sizeof(struct siginfo) - sizeof(struct kernel_siginfo)));
}

int copy_siginfo_to_user(siginfo_t *to, const kernel_siginfo_t *from);
int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t *from);

enum siginfo_layout {
SIL_KILL,
SIL_TIMER,
SIL_POLL,
SIL_FAULT,
SIL_FAULT_TRAPNO,
SIL_FAULT_MCEERR,
SIL_FAULT_BNDERR,
SIL_FAULT_PKUERR,
SIL_FAULT_PERF_EVENT,
SIL_CHLD,
SIL_RT,
SIL_SYS,
};

enum siginfo_layout siginfo_layout(unsigned sig, int si_code);
# 64 "./include/linux/signal.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sigaddset(sigset_t *set, int _sig)
{
unsigned long sig = _sig - 1;
if ((64 / (8 * 8)) == 1)
set->sig[0] |= 1UL << sig;
else
set->sig[sig / (8 * 8)] |= 1UL << (sig % (8 * 8));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sigdelset(sigset_t *set, int _sig)
{
unsigned long sig = _sig - 1;
if ((64 / (8 * 8)) == 1)
set->sig[0] &= ~(1UL << sig);
else
set->sig[sig / (8 * 8)] &= ~(1UL << (sig % (8 * 8)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sigismember(sigset_t *set, int _sig)
{
unsigned long sig = _sig - 1;
if ((64 / (8 * 8)) == 1)
return 1 & (set->sig[0] >> sig);
else
return 1 & (set->sig[sig / (8 * 8)] >> (sig % (8 * 8)));
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sigisemptyset(sigset_t *set)
{
switch ((64 / (8 * 8))) {
case 4:
return (set->sig[3] | set->sig[2] |
set->sig[1] | set->sig[0]) == 0;
case 2:
return (set->sig[1] | set->sig[0]) == 0;
case 1:
return set->sig[0] == 0;
default:
do { __attribute__((__noreturn__)) extern void __compiletime_assert_165(void) ; if (!(!(1))) __compiletime_assert_165(); } while (0);
return 0;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sigequalsets(const sigset_t *set1, const sigset_t *set2)
{
switch ((64 / (8 * 8))) {
case 4:
return (set1->sig[3] == set2->sig[3]) &&
(set1->sig[2] == set2->sig[2]) &&
(set1->sig[1] == set2->sig[1]) &&
(set1->sig[0] == set2->sig[0]);
case 2:
return (set1->sig[1] == set2->sig[1]) &&
(set1->sig[0] == set2->sig[0]);
case 1:
return set1->sig[0] == set2->sig[0];
}
return 0;
}
# 156 "./include/linux/signal.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sigorsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / (8 * 8))) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) | (b3)); r->sig[2] = ((a2) | (b2)); __attribute__((__fallthrough__)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) | (b1)); __attribute__((__fallthrough__)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) | (b0)); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_166(void) ; if (!(!(1))) __compiletime_assert_166(); } while (0); } }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sigandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / (8 * 8))) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & (b3)); r->sig[2] = ((a2) & (b2)); __attribute__((__fallthrough__)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & (b1)); __attribute__((__fallthrough__)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & (b0)); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_167(void) ; if (!(!(1))) __compiletime_assert_167(); } while (0); } }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sigandnsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / (8 * 8))) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & ~(b3)); r->sig[2] = ((a2) & ~(b2)); __attribute__((__fallthrough__)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & ~(b1)); __attribute__((__fallthrough__)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & ~(b0)); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_168(void) ; if (!(!(1))) __compiletime_assert_168(); } while (0); } }
# 186 "./include/linux/signal.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void signotset(sigset_t *set) { switch ((64 / (8 * 8))) { case 4: set->sig[3] = (~(set->sig[3])); set->sig[2] = (~(set->sig[2])); __attribute__((__fallthrough__)); case 2: set->sig[1] = (~(set->sig[1])); __attribute__((__fallthrough__)); case 1: set->sig[0] = (~(set->sig[0])); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_169(void) ; if (!(!(1))) __compiletime_assert_169(); } while (0); } }




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sigemptyset(sigset_t *set)
{
switch ((64 / (8 * 8))) {
default:
memset(set, 0, sizeof(sigset_t));
break;
case 2: set->sig[1] = 0;
__attribute__((__fallthrough__));
case 1: set->sig[0] = 0;
break;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sigfillset(sigset_t *set)
{
switch ((64 / (8 * 8))) {
default:
memset(set, -1, sizeof(sigset_t));
break;
case 2: set->sig[1] = -1;
__attribute__((__fallthrough__));
case 1: set->sig[0] = -1;
break;
}
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sigaddsetmask(sigset_t *set, unsigned long mask)
{
set->sig[0] |= mask;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sigdelsetmask(sigset_t *set, unsigned long mask)
{
set->sig[0] &= ~mask;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sigtestsetmask(sigset_t *set, unsigned long mask)
{
return (set->sig[0] & mask) != 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void siginitset(sigset_t *set, unsigned long mask)
{
set->sig[0] = mask;
switch ((64 / (8 * 8))) {
default:
memset(&set->sig[1], 0, sizeof(long)*((64 / (8 * 8))-1));
break;
case 2: set->sig[1] = 0;
break;
case 1: ;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void siginitsetinv(sigset_t *set, unsigned long mask)
{
set->sig[0] = ~mask;
switch ((64 / (8 * 8))) {
default:
memset(&set->sig[1], -1, sizeof(long)*((64 / (8 * 8))-1));
break;
case 2: set->sig[1] = -1;
break;
case 1: ;
}
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void init_sigpending(struct sigpending *sig)
{
sigemptyset(&sig->signal);
INIT_LIST_HEAD(&sig->list);
}

extern void flush_sigqueue(struct sigpending *queue);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int valid_signal(unsigned long sig)
{
return sig <= 64 ? 1 : 0;
}

struct timespec;
struct pt_regs;
enum pid_type;

extern int next_signal(struct sigpending *pending, sigset_t *mask);
extern int do_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
extern int group_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
extern int sigprocmask(int, sigset_t *, sigset_t *);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
extern int show_unhandled_signals;

extern bool get_signal(struct ksignal *ksig);
extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping);
extern void exit_signals(struct task_struct *tsk);
extern void kernel_sigaction(int, __sighandler_t);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void allow_signal(int sig)
{





kernel_sigaction(sig, (( __sighandler_t)2));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void allow_kernel_signal(int sig)
{





kernel_sigaction(sig, (( __sighandler_t)3));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void disallow_signal(int sig)
{
kernel_sigaction(sig, (( __sighandler_t)1));
}

extern struct kmem_cache *sighand_cachep;

extern bool unhandled_signal(struct task_struct *tsk, int sig);
# 453 "./include/linux/signal.h"
void signals_init(void);

int restore_altstack(const stack_t *);
int __save_altstack(stack_t *, unsigned long);
# 469 "./include/linux/signal.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sigaltstack_size_valid(size_t size) { return true; }



struct seq_file;
extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
# 483 "./include/linux/signal.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *arch_untagged_si_addr(void *addr,
unsigned long sig,
unsigned long si_code)
{
return addr;
}
# 7 "./include/linux/sched/signal.h" 2
# 1 "./include/linux/sched.h" 1
# 10 "./include/linux/sched.h"
# 1 "./include/uapi/linux/sched.h" 1
# 92 "./include/uapi/linux/sched.h"
struct clone_args {
__u64 __attribute__((aligned(8))) flags;
__u64 __attribute__((aligned(8))) pidfd;
__u64 __attribute__((aligned(8))) child_tid;
__u64 __attribute__((aligned(8))) parent_tid;
__u64 __attribute__((aligned(8))) exit_signal;
__u64 __attribute__((aligned(8))) stack;
__u64 __attribute__((aligned(8))) stack_size;
__u64 __attribute__((aligned(8))) tls;
__u64 __attribute__((aligned(8))) set_tid;
__u64 __attribute__((aligned(8))) set_tid_size;
__u64 __attribute__((aligned(8))) cgroup;
};
# 11 "./include/linux/sched.h" 2




# 1 "./include/linux/sem.h" 1




# 1 "./include/uapi/linux/sem.h" 1




# 1 "./include/linux/ipc.h" 1






# 1 "./include/linux/rhashtable-types.h" 1
# 17 "./include/linux/rhashtable-types.h"
struct rhash_head {
struct rhash_head *next;
};

struct rhlist_head {
struct rhash_head rhead;
struct rhlist_head *next;
};

struct bucket_table;






struct rhashtable_compare_arg {
struct rhashtable *ht;
const void *key;
};

typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
const void *obj);
# 56 "./include/linux/rhashtable-types.h"
struct rhashtable_params {
u16 nelem_hint;
u16 key_len;
u16 key_offset;
u16 head_offset;
unsigned int max_size;
u16 min_size;
bool automatic_shrinking;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
rht_obj_cmpfn_t obj_cmpfn;
};
# 81 "./include/linux/rhashtable-types.h"
struct rhashtable {
struct bucket_table *tbl;
unsigned int key_len;
unsigned int max_elems;
struct rhashtable_params p;
bool rhlist;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
atomic_t nelems;
};





struct rhltable {
struct rhashtable ht;
};






struct rhashtable_walker {
struct list_head list;
struct bucket_table *tbl;
};
# 120 "./include/linux/rhashtable-types.h"
struct rhashtable_iter {
struct rhashtable *ht;
struct rhash_head *p;
struct rhlist_head *list;
struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
bool end_of_table;
};

int rhashtable_init(struct rhashtable *ht,
const struct rhashtable_params *params);
int rhltable_init(struct rhltable *hlt,
const struct rhashtable_params *params);
# 8 "./include/linux/ipc.h" 2
# 1 "./include/uapi/linux/ipc.h" 1
# 10 "./include/uapi/linux/ipc.h"
struct ipc_perm
{
__kernel_key_t key;
__kernel_uid_t uid;
__kernel_gid_t gid;
__kernel_uid_t cuid;
__kernel_gid_t cgid;
__kernel_mode_t mode;
unsigned short seq;
};



# 1 "./arch/riscv/include/generated/uapi/asm/ipcbuf.h" 1
# 1 "./include/uapi/asm-generic/ipcbuf.h" 1
# 22 "./include/uapi/asm-generic/ipcbuf.h"
struct ipc64_perm {
__kernel_key_t key;
__kernel_uid32_t uid;
__kernel_gid32_t gid;
__kernel_uid32_t cuid;
__kernel_gid32_t cgid;
__kernel_mode_t mode;

unsigned char __pad1[4 - sizeof(__kernel_mode_t)];
unsigned short seq;
unsigned short __pad2;
__kernel_ulong_t __unused1;
__kernel_ulong_t __unused2;
};
# 2 "./arch/riscv/include/generated/uapi/asm/ipcbuf.h" 2
# 23 "./include/uapi/linux/ipc.h" 2
# 58 "./include/uapi/linux/ipc.h"
struct ipc_kludge {
struct msgbuf *msgp;
long msgtyp;
};
# 9 "./include/linux/ipc.h" 2



struct kern_ipc_perm {
spinlock_t lock;
bool deleted;
int id;
key_t key;
kuid_t uid;
kgid_t gid;
kuid_t cuid;
kgid_t cgid;
umode_t mode;
unsigned long seq;
void *security;

struct rhash_head khtnode;

struct callback_head rcu;
refcount_t refcount;
} __attribute__((__aligned__((1 << 6)))) ;
# 6 "./include/uapi/linux/sem.h" 2
# 25 "./include/uapi/linux/sem.h"
struct semid_ds {
struct ipc_perm sem_perm;
__kernel_old_time_t sem_otime;
__kernel_old_time_t sem_ctime;
struct sem *sem_base;
struct sem_queue *sem_pending;
struct sem_queue **sem_pending_last;
struct sem_undo *undo;
unsigned short sem_nsems;
};



# 1 "./arch/riscv/include/generated/uapi/asm/sembuf.h" 1
# 1 "./include/uapi/asm-generic/sembuf.h" 1





# 1 "./arch/riscv/include/generated/uapi/asm/ipcbuf.h" 1
# 7 "./include/uapi/asm-generic/sembuf.h" 2
# 29 "./include/uapi/asm-generic/sembuf.h"
struct semid64_ds {
struct ipc64_perm sem_perm;

long sem_otime;
long sem_ctime;






unsigned long sem_nsems;
unsigned long __unused3;
unsigned long __unused4;
};
# 2 "./arch/riscv/include/generated/uapi/asm/sembuf.h" 2
# 38 "./include/uapi/linux/sem.h" 2


struct sembuf {
unsigned short sem_num;
short sem_op;
short sem_flg;
};


union semun {
int val;
struct semid_ds *buf;
unsigned short *array;
struct seminfo *__buf;
void *__pad;
};

struct seminfo {
int semmap;
int semmni;
int semmns;
int semmnu;
int semmsl;
int semopm;
int semume;
int semusz;
int semvmx;
int semaem;
};
# 6 "./include/linux/sem.h" 2

struct task_struct;
struct sem_undo_list;



struct sysv_sem {
struct sem_undo_list *undo_list;
};

extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
extern void exit_sem(struct task_struct *tsk);
# 16 "./include/linux/sched.h" 2
# 1 "./include/linux/shm.h" 1






# 1 "./include/uapi/linux/shm.h" 1






# 1 "./include/uapi/asm-generic/hugetlb_encode.h" 1
# 8 "./include/uapi/linux/shm.h" 2
# 28 "./include/uapi/linux/shm.h"
struct shmid_ds {
struct ipc_perm shm_perm;
int shm_segsz;
__kernel_old_time_t shm_atime;
__kernel_old_time_t shm_dtime;
__kernel_old_time_t shm_ctime;
__kernel_ipc_pid_t shm_cpid;
__kernel_ipc_pid_t shm_lpid;
unsigned short shm_nattch;
unsigned short shm_unused;
void *shm_unused2;
void *shm_unused3;
};



# 1 "./arch/riscv/include/generated/uapi/asm/shmbuf.h" 1
# 1 "./include/uapi/asm-generic/shmbuf.h" 1





# 1 "./arch/riscv/include/generated/uapi/asm/ipcbuf.h" 1
# 7 "./include/uapi/asm-generic/shmbuf.h" 2
# 1 "./arch/riscv/include/generated/uapi/asm/posix_types.h" 1
# 8 "./include/uapi/asm-generic/shmbuf.h" 2
# 27 "./include/uapi/asm-generic/shmbuf.h"
struct shmid64_ds {
struct ipc64_perm shm_perm;
__kernel_size_t shm_segsz;

long shm_atime;
long shm_dtime;
long shm_ctime;
# 42 "./include/uapi/asm-generic/shmbuf.h"
__kernel_pid_t shm_cpid;
__kernel_pid_t shm_lpid;
unsigned long shm_nattch;
unsigned long __unused4;
unsigned long __unused5;
};

struct shminfo64 {
unsigned long shmmax;
unsigned long shmmin;
unsigned long shmmni;
unsigned long shmseg;
unsigned long shmall;
unsigned long __unused1;
unsigned long __unused2;
unsigned long __unused3;
unsigned long __unused4;
};
# 2 "./arch/riscv/include/generated/uapi/asm/shmbuf.h" 2
# 44 "./include/uapi/linux/shm.h" 2
# 93 "./include/uapi/linux/shm.h"
struct shminfo {
int shmmax;
int shmmin;
int shmmni;
int shmseg;
int shmall;
};

struct shm_info {
int used_ids;
__kernel_ulong_t shm_tot;
__kernel_ulong_t shm_rss;
__kernel_ulong_t shm_swp;
__kernel_ulong_t swap_attempts;
__kernel_ulong_t swap_successes;
};
# 8 "./include/linux/shm.h" 2
# 1 "./arch/riscv/include/generated/asm/shmparam.h" 1
# 1 "./include/asm-generic/shmparam.h" 1
# 2 "./arch/riscv/include/generated/asm/shmparam.h" 2
# 9 "./include/linux/shm.h" 2

struct file;


struct sysv_shm {
struct list_head shm_clist;
};

long do_shmat(int shmid, char *shmaddr, int shmflg, unsigned long *addr,
unsigned long shmlba);
bool is_file_shm_hugepages(struct file *file);
void exit_shm(struct task_struct *task);
# 17 "./include/linux/sched.h" 2

# 1 "./include/linux/plist.h" 1
# 82 "./include/linux/plist.h"
struct plist_head {
struct list_head node_list;
};

struct plist_node {
int prio;
struct list_head prio_list;
struct list_head node_list;
};
# 124 "./include/linux/plist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
plist_head_init(struct plist_head *head)
{
INIT_LIST_HEAD(&head->node_list);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void plist_node_init(struct plist_node *node, int prio)
{
node->prio = prio;
INIT_LIST_HEAD(&node->prio_list);
INIT_LIST_HEAD(&node->node_list);
}

extern void plist_add(struct plist_node *node, struct plist_head *head);
extern void plist_del(struct plist_node *node, struct plist_head *head);

extern void plist_requeue(struct plist_node *node, struct plist_head *head);
# 213 "./include/linux/plist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int plist_head_empty(const struct plist_head *head)
{
return list_empty(&head->node_list);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int plist_node_empty(const struct plist_node *node)
{
return list_empty(&node->node_list);
}
# 283 "./include/linux/plist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct plist_node *plist_first(const struct plist_head *head)
{
return ({ void *__mptr = (void *)(head->node_list.next); _Static_assert(__builtin_types_compatible_p(typeof(*(head->node_list.next)), typeof(((struct plist_node *)0)->node_list)) || __builtin_types_compatible_p(typeof(*(head->node_list.next)), typeof(void)), "pointer type mismatch in container_of()"); ((struct plist_node *)(__mptr - __builtin_offsetof(struct plist_node, node_list))); });

}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct plist_node *plist_last(const struct plist_head *head)
{
return ({ void *__mptr = (void *)(head->node_list.prev); _Static_assert(__builtin_types_compatible_p(typeof(*(head->node_list.prev)), typeof(((struct plist_node *)0)->node_list)) || __builtin_types_compatible_p(typeof(*(head->node_list.prev)), typeof(void)), "pointer type mismatch in container_of()"); ((struct plist_node *)(__mptr - __builtin_offsetof(struct plist_node, node_list))); });

}
# 19 "./include/linux/sched.h" 2
# 1 "./include/linux/hrtimer.h" 1
# 15 "./include/linux/hrtimer.h"
# 1 "./include/linux/hrtimer_defs.h" 1
# 16 "./include/linux/hrtimer.h" 2






# 1 "./include/linux/timerqueue.h" 1








struct timerqueue_node {
struct rb_node node;
ktime_t expires;
};

struct timerqueue_head {
struct rb_root_cached rb_root;
};


extern bool timerqueue_add(struct timerqueue_head *head,
struct timerqueue_node *node);
extern bool timerqueue_del(struct timerqueue_head *head,
struct timerqueue_node *node);
extern struct timerqueue_node *timerqueue_iterate_next(
struct timerqueue_node *node);
# 33 "./include/linux/timerqueue.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
{
struct rb_node *leftmost = (&head->rb_root)->rb_leftmost;

return ({ void *__mptr = (void *)(leftmost); _Static_assert(__builtin_types_compatible_p(typeof(*(leftmost)), typeof(((struct timerqueue_node *)0)->node)) || __builtin_types_compatible_p(typeof(*(leftmost)), typeof(void)), "pointer type mismatch in container_of()"); ((struct timerqueue_node *)(__mptr - __builtin_offsetof(struct timerqueue_node, node))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void timerqueue_init(struct timerqueue_node *node)
{
((&node->node)->__rb_parent_color = (unsigned long)(&node->node));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool timerqueue_node_queued(struct timerqueue_node *node)
{
return !((&node->node)->__rb_parent_color == (unsigned long)(&node->node));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool timerqueue_node_expires(struct timerqueue_node *node)
{
return node->expires;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void timerqueue_init_head(struct timerqueue_head *head)
{
head->rb_root = (struct rb_root_cached) { {((void *)0), }, ((void *)0) };
}
# 23 "./include/linux/hrtimer.h" 2

struct hrtimer_clock_base;
struct hrtimer_cpu_base;
# 39 "./include/linux/hrtimer.h"
enum hrtimer_mode {
HRTIMER_MODE_ABS = 0x00,
HRTIMER_MODE_REL = 0x01,
HRTIMER_MODE_PINNED = 0x02,
HRTIMER_MODE_SOFT = 0x04,
HRTIMER_MODE_HARD = 0x08,

HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,

HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,

HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,

HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD,
HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD,

HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD,
HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD,
};




enum hrtimer_restart {
HRTIMER_NORESTART,
HRTIMER_RESTART,
};
# 118 "./include/linux/hrtimer.h"
struct hrtimer {
struct timerqueue_node node;
ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
u8 is_soft;
u8 is_hard;
};
# 136 "./include/linux/hrtimer.h"
struct hrtimer_sleeper {
struct hrtimer timer;
struct task_struct *task;
};
# 159 "./include/linux/hrtimer.h"
struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
unsigned int index;
clockid_t clockid;
seqcount_raw_spinlock_t seq;
struct hrtimer *running;
struct timerqueue_head active;
ktime_t (*get_time)(void);
ktime_t offset;
} __attribute__((__aligned__((1 << 6))));

enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
HRTIMER_BASE_REALTIME,
HRTIMER_BASE_BOOTTIME,
HRTIMER_BASE_TAI,
HRTIMER_BASE_MONOTONIC_SOFT,
HRTIMER_BASE_REALTIME_SOFT,
HRTIMER_BASE_BOOTTIME_SOFT,
HRTIMER_BASE_TAI_SOFT,
HRTIMER_MAX_CLOCK_BASES,
};
# 214 "./include/linux/hrtimer.h"
struct hrtimer_cpu_base {
raw_spinlock_t lock;
unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set_seq;
unsigned int hres_active : 1,
in_hrtirq : 1,
hang_detected : 1,
softirq_activated : 1;

unsigned int nr_events;
unsigned short nr_retries;
unsigned short nr_hangs;
unsigned int max_hang_time;





ktime_t expires_next;
struct hrtimer *next_timer;
ktime_t softirq_expires_next;
struct hrtimer *softirq_next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} __attribute__((__aligned__((1 << 6))));

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
timer->node.expires = time;
timer->_softexpires = time;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
{
timer->_softexpires = time;
timer->node.expires = ktime_add_safe(time, delta);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta)
{
timer->_softexpires = time;
timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
{
timer->node.expires = tv64;
timer->_softexpires = tv64;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
{
timer->node.expires = ktime_add_safe(timer->node.expires, time);
timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
{
timer->node.expires = ((timer->node.expires) + (ns));
timer->_softexpires = ((timer->_softexpires) + (ns));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t hrtimer_get_expires(const struct hrtimer *timer)
{
return timer->node.expires;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
{
return timer->_softexpires;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
{
return timer->node.expires;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
{
return timer->_softexpires;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
{
return ktime_to_ns(timer->node.expires);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
{
return ((timer->node.expires) - (timer->base->get_time()));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
{
return timer->base->get_time();
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int hrtimer_is_hres_active(struct hrtimer *timer)
{
return 1 ?
timer->base->cpu_base->hres_active : 0;
}


struct clock_event_device;

extern void hrtimer_interrupt(struct clock_event_device *dev);

extern unsigned int hrtimer_resolution;







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t
__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
{
ktime_t rem = ((timer->node.expires) - (now));





if (0 && timer->is_rel)
rem -= hrtimer_resolution;
return rem;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t
hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
{
return __hrtimer_expires_remaining_adjusted(timer,
timer->base->get_time());
}


extern void timerfd_clock_was_set(void);
extern void timerfd_resume(void);





extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tick_device) tick_cpu_device;




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hrtimer_cancel_wait_running(struct hrtimer *timer)
{
cpu_relax();
}





extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
enum hrtimer_mode mode);
extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
enum hrtimer_mode mode);
# 386 "./include/linux/hrtimer.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hrtimer_init_on_stack(struct hrtimer *timer,
clockid_t which_clock,
enum hrtimer_mode mode)
{
hrtimer_init(timer, which_clock, mode);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
clockid_t clock_id,
enum hrtimer_mode mode)
{
hrtimer_init_sleeper(sl, clock_id, mode);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void destroy_hrtimer_on_stack(struct hrtimer *timer) { }



extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 range_ns, const enum hrtimer_mode mode);
# 415 "./include/linux/hrtimer.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
{
hrtimer_start_range_ns(timer, tim, 0, mode);
}

extern int hrtimer_cancel(struct hrtimer *timer);
extern int hrtimer_try_to_cancel(struct hrtimer *timer);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hrtimer_start_expires(struct hrtimer *timer,
enum hrtimer_mode mode)
{
u64 delta;
ktime_t soft, hard;
soft = hrtimer_get_softexpires(timer);
hard = hrtimer_get_expires(timer);
delta = ktime_to_ns(((hard) - (soft)));
hrtimer_start_range_ns(timer, soft, delta, mode);
}

void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
enum hrtimer_mode mode);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hrtimer_restart(struct hrtimer *timer)
{
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}


extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
return __hrtimer_get_remaining(timer, false);
}

extern u64 hrtimer_get_next_event(void);
extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);

extern bool hrtimer_active(const struct hrtimer *timer);
# 468 "./include/linux/hrtimer.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool hrtimer_is_queued(struct hrtimer *timer)
{

return !!(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_170(void) ; if (!((sizeof(timer->state) == sizeof(char) || sizeof(timer->state) == sizeof(short) || sizeof(timer->state) == sizeof(int) || sizeof(timer->state) == sizeof(long)) || sizeof(timer->state) == sizeof(long long))) __compiletime_assert_170(); } while (0); (*(const volatile typeof( _Generic((timer->state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (timer->state))) *)&(timer->state)); }) & 0x01);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int hrtimer_callback_running(struct hrtimer *timer)
{
return timer->base->running == timer;
}


extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
# 503 "./include/linux/hrtimer.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 hrtimer_forward_now(struct hrtimer *timer,
ktime_t interval)
{
return hrtimer_forward(timer, timer->base->get_time(), interval);
}



extern int nanosleep_copyout(struct restart_block *, struct timespec64 *);
extern long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
const clockid_t clockid);

extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
const enum hrtimer_mode mode);
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
u64 delta,
const enum hrtimer_mode mode,
clockid_t clock_id);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);


extern void hrtimer_run_queues(void);


extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) hrtimers_init(void);


extern void sysrq_timer_list_show(void);

int hrtimers_prepare_cpu(unsigned int cpu);

int hrtimers_dead_cpu(unsigned int cpu);
# 20 "./include/linux/sched.h" 2

# 1 "./include/linux/seccomp.h" 1




# 1 "./include/uapi/linux/seccomp.h" 1
# 60 "./include/uapi/linux/seccomp.h"
struct seccomp_data {
int nr;
__u32 arch;
__u64 instruction_pointer;
__u64 args[6];
};

struct seccomp_notif_sizes {
__u16 seccomp_notif;
__u16 seccomp_notif_resp;
__u16 seccomp_data;
};

struct seccomp_notif {
__u64 id;
__u32 pid;
__u32 flags;
struct seccomp_data data;
};
# 109 "./include/uapi/linux/seccomp.h"
struct seccomp_notif_resp {
__u64 id;
__s64 val;
__s32 error;
__u32 flags;
};
# 128 "./include/uapi/linux/seccomp.h"
struct seccomp_notif_addfd {
__u64 id;
__u32 flags;
__u32 srcfd;
__u32 newfd;
__u32 newfd_flags;
};
# 6 "./include/linux/seccomp.h" 2
# 65 "./include/linux/seccomp.h"
struct seccomp { };
struct seccomp_filter { };
struct seccomp_data;


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int secure_computing(void) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __secure_computing(const struct seccomp_data *sd) { return 0; }




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long prctl_get_seccomp(void)
{
return -22;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long prctl_set_seccomp(unsigned long arg2, char *arg3)
{
return -22;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int seccomp_mode(struct seccomp *s)
{
return 0;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void seccomp_filter_release(struct task_struct *tsk)
{
return;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void get_seccomp_filter(struct task_struct *tsk)
{
return;
}
# 112 "./include/linux/seccomp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long seccomp_get_filter(struct task_struct *task,
unsigned long n, void *data)
{
return -22;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long seccomp_get_metadata(struct task_struct *task,
unsigned long filter_off,
void *data)
{
return -22;
}
# 22 "./include/linux/sched.h" 2



# 1 "./include/linux/resource.h" 1




# 1 "./include/uapi/linux/resource.h" 1
# 24 "./include/uapi/linux/resource.h"
struct rusage {
struct __kernel_old_timeval ru_utime;
struct __kernel_old_timeval ru_stime;
__kernel_long_t ru_maxrss;
__kernel_long_t ru_ixrss;
__kernel_long_t ru_idrss;
__kernel_long_t ru_isrss;
__kernel_long_t ru_minflt;
__kernel_long_t ru_majflt;
__kernel_long_t ru_nswap;
__kernel_long_t ru_inblock;
__kernel_long_t ru_oublock;
__kernel_long_t ru_msgsnd;
__kernel_long_t ru_msgrcv;
__kernel_long_t ru_nsignals;
__kernel_long_t ru_nvcsw;
__kernel_long_t ru_nivcsw;
};

struct rlimit {
__kernel_ulong_t rlim_cur;
__kernel_ulong_t rlim_max;
};



struct rlimit64 {
__u64 rlim_cur;
__u64 rlim_max;
};
# 85 "./include/uapi/linux/resource.h"
# 1 "./arch/riscv/include/generated/uapi/asm/resource.h" 1
# 1 "./include/asm-generic/resource.h" 1




# 1 "./include/uapi/asm-generic/resource.h" 1
# 6 "./include/asm-generic/resource.h" 2
# 2 "./arch/riscv/include/generated/uapi/asm/resource.h" 2
# 86 "./include/uapi/linux/resource.h" 2
# 6 "./include/linux/resource.h" 2


struct task_struct;

void getrusage(struct task_struct *p, int who, struct rusage *ru);
# 26 "./include/linux/sched.h" 2
# 1 "./include/linux/latencytop.h" 1
# 14 "./include/linux/latencytop.h"
struct task_struct;
# 46 "./include/linux/latencytop.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
account_scheduler_latency(struct task_struct *task, int usecs, int inter)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_tsk_latency_tracing(struct task_struct *p)
{
}
# 27 "./include/linux/sched.h" 2
# 1 "./include/linux/sched/prio.h" 1
# 32 "./include/linux/sched/prio.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long nice_to_rlimit(long nice)
{
return (19 - nice + 1);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long rlimit_to_nice(long prio)
{
return (19 - prio + 1);
}
# 28 "./include/linux/sched.h" 2
# 1 "./include/linux/sched/types.h" 1
# 17 "./include/linux/sched/types.h"
struct task_cputime {
u64 stime;
u64 utime;
unsigned long long sum_exec_runtime;
};
# 29 "./include/linux/sched.h" 2

# 1 "./include/linux/syscall_user_dispatch.h" 1
# 26 "./include/linux/syscall_user_dispatch.h"
struct syscall_user_dispatch {};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
unsigned long len, char *selector)
{
return -22;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_syscall_work_syscall_user_dispatch(struct task_struct *tsk)
{
}
# 31 "./include/linux/sched.h" 2

# 1 "./include/linux/task_io_accounting.h" 1
# 12 "./include/linux/task_io_accounting.h"
struct task_io_accounting {
# 46 "./include/linux/task_io_accounting.h"
};
# 33 "./include/linux/sched.h" 2
# 1 "./include/linux/posix-timers.h" 1






# 1 "./include/linux/alarmtimer.h" 1








struct rtc_device;

enum alarmtimer_type {
ALARM_REALTIME,
ALARM_BOOTTIME,


ALARM_NUMTYPE,


ALARM_REALTIME_FREEZER,
ALARM_BOOTTIME_FREEZER,
};

enum alarmtimer_restart {
ALARMTIMER_NORESTART,
ALARMTIMER_RESTART,
};
# 42 "./include/linux/alarmtimer.h"
struct alarm {
struct timerqueue_node node;
struct hrtimer timer;
enum alarmtimer_restart (*function)(struct alarm *, ktime_t now);
enum alarmtimer_type type;
int state;
void *data;
};

void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
void alarm_start(struct alarm *alarm, ktime_t start);
void alarm_start_relative(struct alarm *alarm, ktime_t start);
void alarm_restart(struct alarm *alarm);
int alarm_try_to_cancel(struct alarm *alarm);
int alarm_cancel(struct alarm *alarm);

u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval);
ktime_t alarm_expires_remaining(const struct alarm *alarm);



struct rtc_device *alarmtimer_get_rtcdev(void);
# 8 "./include/linux/posix-timers.h" 2


struct kernel_siginfo;
struct task_struct;
# 38 "./include/linux/posix-timers.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) clockid_t make_process_cpuclock(const unsigned int pid,
const clockid_t clock)
{
return ((~pid) << 3) | clock;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) clockid_t make_thread_cpuclock(const unsigned int tid,
const clockid_t clock)
{
return make_process_cpuclock(tid, clock | 4);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) clockid_t fd_to_clockid(const int fd)
{
return make_process_cpuclock((unsigned int) fd, 3);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int clockid_to_fd(const clockid_t clk)
{
return ~(clk >> 3);
}
# 69 "./include/linux/posix-timers.h"
struct cpu_timer {
struct timerqueue_node node;
struct timerqueue_head *head;
struct pid *pid;
struct list_head elist;
int firing;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpu_timer_enqueue(struct timerqueue_head *head,
struct cpu_timer *ctmr)
{
ctmr->head = head;
return timerqueue_add(head, &ctmr->node);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpu_timer_queued(struct cpu_timer *ctmr)
{
return !!ctmr->head;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpu_timer_dequeue(struct cpu_timer *ctmr)
{
if (cpu_timer_queued(ctmr)) {
timerqueue_del(ctmr->head, &ctmr->node);
ctmr->head = ((void *)0);
return true;
}
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 cpu_timer_getexpires(struct cpu_timer *ctmr)
{
return ctmr->node.expires;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp)
{
ctmr->node.expires = exp;
}






struct posix_cputimer_base {
u64 nextevt;
struct timerqueue_head tqhead;
};
# 129 "./include/linux/posix-timers.h"
struct posix_cputimers {
struct posix_cputimer_base bases[3];
unsigned int timers_active;
unsigned int expiry_active;
};






struct posix_cputimers_work {
struct callback_head work;
unsigned int scheduled;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void posix_cputimers_init(struct posix_cputimers *pct)
{
memset(pct, 0, sizeof(*pct));
pct->bases[0].nextevt = ((u64)~0ULL);
pct->bases[1].nextevt = ((u64)~0ULL);
pct->bases[2].nextevt = ((u64)~0ULL);
}

void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void posix_cputimers_rt_watchdog(struct posix_cputimers *pct,
u64 runtime)
{
pct->bases[2].nextevt = runtime;
}
# 189 "./include/linux/posix-timers.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_posix_cputimers_work(struct task_struct *p) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void posix_cputimers_init_work(void) { }
# 218 "./include/linux/posix-timers.h"
struct k_itimer {
struct list_head list;
struct hlist_node t_hash;
spinlock_t it_lock;
const struct k_clock *kclock;
clockid_t it_clock;
timer_t it_id;
int it_active;
s64 it_overrun;
s64 it_overrun_last;
int it_requeue_pending;
int it_sigev_notify;
ktime_t it_interval;
struct signal_struct *it_signal;
union {
struct pid *it_pid;
struct task_struct *it_process;
};
struct sigqueue *sigq;
union {
struct {
struct hrtimer timer;
} real;
struct cpu_timer cpu;
struct {
struct alarm alarmtimer;
} alarm;
} it;
struct callback_head rcu;
};

void run_posix_cpu_timers(void);
void posix_cpu_timers_exit(struct task_struct *task);
void posix_cpu_timers_exit_group(struct task_struct *task);
void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
u64 *newval, u64 *oldval);

int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);

void posixtimer_rearm(struct kernel_siginfo *info);
# 34 "./include/linux/sched.h" 2
# 1 "./include/uapi/linux/rseq.h" 1
# 16 "./include/uapi/linux/rseq.h"
enum rseq_cpu_id_state {
RSEQ_CPU_ID_UNINITIALIZED = -1,
RSEQ_CPU_ID_REGISTRATION_FAILED = -2,
};

enum rseq_flags {
RSEQ_FLAG_UNREGISTER = (1 << 0),
};

enum rseq_cs_flags_bit {
RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0,
RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1,
RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2,
};

enum rseq_cs_flags {
RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT =
(1U << RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT),
RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL =
(1U << RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT),
RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE =
(1U << RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT),
};






struct rseq_cs {

__u32 version;

__u32 flags;
__u64 start_ip;

__u64 post_commit_offset;
__u64 abort_ip;
} __attribute__((aligned(4 * sizeof(__u64))));







struct rseq {
# 75 "./include/uapi/linux/rseq.h"
__u32 cpu_id_start;
# 90 "./include/uapi/linux/rseq.h"
__u32 cpu_id;
# 112 "./include/uapi/linux/rseq.h"
__u64 rseq_cs;
# 132 "./include/uapi/linux/rseq.h"
__u32 flags;
} __attribute__((aligned(4 * sizeof(__u64))));
# 35 "./include/linux/sched.h" 2

# 1 "./include/linux/kcsan.h" 1
# 71 "./include/linux/kcsan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kcsan_init(void) { }
# 37 "./include/linux/sched.h" 2
# 1 "./arch/riscv/include/generated/asm/kmap_size.h" 1
# 1 "./include/asm-generic/kmap_size.h" 1
# 2 "./arch/riscv/include/generated/asm/kmap_size.h" 2
# 38 "./include/linux/sched.h" 2


struct audit_context;
struct backing_dev_info;
struct bio_list;
struct blk_plug;
struct bpf_local_storage;
struct bpf_run_ctx;
struct capture_control;
struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
struct io_uring_task;
struct mempolicy;
struct nameidata;
struct nsproxy;
struct perf_event_context;
struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
struct reclaim_state;
struct robust_list_head;
struct root_domain;
struct rq;
struct sched_attr;
struct sched_param;
struct seq_file;
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
# 281 "./include/linux/sched.h"
enum {
TASK_COMM_LEN = 16,
};

extern void scheduler_tick(void);



extern long schedule_timeout(long timeout);
extern long schedule_timeout_interruptible(long timeout);
extern long schedule_timeout_killable(long timeout);
extern long schedule_timeout_uninterruptible(long timeout);
extern long schedule_timeout_idle(long timeout);
void schedule(void);
extern void schedule_preempt_disabled(void);
void preempt_schedule_irq(void);




extern int __attribute__((__warn_unused_result__)) io_schedule_prepare(void);
extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
# 315 "./include/linux/sched.h"
struct prev_cputime {

u64 utime;
u64 stime;
raw_spinlock_t lock;

};

enum vtime_state {

VTIME_INACTIVE = 0,

VTIME_IDLE,

VTIME_SYS,

VTIME_USER,

VTIME_GUEST,
};

struct vtime {
seqcount_t seqcount;
unsigned long long starttime;
enum vtime_state state;
unsigned int cpu;
u64 utime;
u64 stime;
u64 gtime;
};







enum uclamp_id {
UCLAMP_MIN = 0,
UCLAMP_MAX,
UCLAMP_CNT
};


extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex;


struct sched_info {




unsigned long pcount;


unsigned long long run_delay;




unsigned long long last_arrival;


unsigned long long last_queued;


};
# 398 "./include/linux/sched.h"
struct load_weight {
unsigned long weight;
u32 inv_weight;
};
# 432 "./include/linux/sched.h"
struct util_est {
unsigned int enqueued;
unsigned int ewma;


} __attribute__((__aligned__(sizeof(u64))));
# 484 "./include/linux/sched.h"
struct sched_avg {
u64 last_update_time;
u64 load_sum;
u64 runnable_sum;
u32 util_sum;
u32 period_contrib;
unsigned long load_avg;
unsigned long runnable_avg;
unsigned long util_avg;
struct util_est util_est;
} __attribute__((__aligned__((1 << 6))));

struct sched_statistics {

u64 wait_start;
u64 wait_max;
u64 wait_count;
u64 wait_sum;
u64 iowait_count;
u64 iowait_sum;

u64 sleep_start;
u64 sleep_max;
s64 sum_sleep_runtime;

u64 block_start;
u64 block_max;
s64 sum_block_runtime;

u64 exec_max;
u64 slice_max;

u64 nr_migrations_cold;
u64 nr_failed_migrations_affine;
u64 nr_failed_migrations_running;
u64 nr_failed_migrations_hot;
u64 nr_forced_migrations;

u64 nr_wakeups;
u64 nr_wakeups_sync;
u64 nr_wakeups_migrate;
u64 nr_wakeups_local;
u64 nr_wakeups_remote;
u64 nr_wakeups_affine;
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;





} __attribute__((__aligned__((1 << 6))));

struct sched_entity {

struct load_weight load;
struct rb_node run_node;
struct list_head group_node;
unsigned int on_rq;

u64 exec_start;
u64 sum_exec_runtime;
u64 vruntime;
u64 prev_sum_exec_runtime;

u64 nr_migrations;


int depth;
struct sched_entity *parent;

struct cfs_rq *cfs_rq;

struct cfs_rq *my_q;

unsigned long runnable_weight;
# 570 "./include/linux/sched.h"
struct sched_avg avg;

};

struct sched_rt_entity {
struct list_head run_list;
unsigned long timeout;
unsigned long watchdog_stamp;
unsigned int time_slice;
unsigned short on_rq;
unsigned short on_list;

struct sched_rt_entity *back;







} ;

struct sched_dl_entity {
struct rb_node rb_node;






u64 dl_runtime;
u64 dl_deadline;
u64 dl_period;
u64 dl_bw;
u64 dl_density;






s64 runtime;
u64 deadline;
unsigned int flags;
# 635 "./include/linux/sched.h"
unsigned int dl_throttled : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;





struct hrtimer dl_timer;
# 653 "./include/linux/sched.h"
struct hrtimer inactive_timer;







struct sched_dl_entity *pi_se;

};
# 700 "./include/linux/sched.h"
union rcu_special {
struct {
u8 blocked;
u8 need_qs;
u8 exp_hint;
u8 need_mb;
} b;
u32 s;
};

enum perf_event_task_context {
perf_invalid_context = -1,
perf_hw_context = 0,
perf_sw_context,
perf_nr_task_contexts,
};

struct wake_q_node {
struct wake_q_node *next;
};

struct kmap_ctrl {




};

struct task_struct {





struct thread_info thread_info;

unsigned int __state;
# 749 "./include/linux/sched.h"
void *stack;
refcount_t usage;

unsigned int flags;
unsigned int ptrace;


int on_cpu;
struct __call_single_node wake_entry;
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
# 769 "./include/linux/sched.h"
int recent_used_cpu;
int wake_cpu;

int on_rq;

int prio;
int static_prio;
int normal_prio;
unsigned int rt_priority;

struct sched_entity se;
struct sched_rt_entity rt;
struct sched_dl_entity dl;
const struct sched_class *sched_class;
# 791 "./include/linux/sched.h"
struct task_group *sched_task_group;
# 807 "./include/linux/sched.h"
struct sched_statistics stats;



struct hlist_head preempt_notifiers;



unsigned int btrace_seq;


unsigned int policy;
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t *user_cpus_ptr;
cpumask_t cpus_mask;
void *migration_pending;

unsigned short migration_disabled;

unsigned short migration_flags;
# 845 "./include/linux/sched.h"
int trc_reader_nesting;
int trc_ipi_to_cpu;
union rcu_special trc_reader_special;
bool trc_reader_checked;
struct list_head trc_holdout_list;


struct sched_info sched_info;

struct list_head tasks;

struct plist_node pushable_tasks;
struct rb_node pushable_dl_tasks;


struct mm_struct *mm;
struct mm_struct *active_mm;


struct vmacache vmacache;


struct task_rss_stat rss_stat;

int exit_state;
int exit_code;
int exit_signal;

int pdeath_signal;

unsigned long jobctl;


unsigned int personality;


unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;





unsigned :0;
# 906 "./include/linux/sched.h"
unsigned sched_remote_wakeup:1;


unsigned in_execve:1;
unsigned in_iowait:1;







unsigned brk_randomized:1;



unsigned no_cgroup_migration:1;

unsigned frozen:1;
# 939 "./include/linux/sched.h"
unsigned in_eventfd_signal:1;





unsigned long atomic_flags;

struct restart_block restart_block;

pid_t pid;
pid_t tgid;



unsigned long stack_canary;
# 963 "./include/linux/sched.h"
struct task_struct *real_parent;


struct task_struct *parent;




struct list_head children;
struct list_head sibling;
struct task_struct *group_leader;







struct list_head ptraced;
struct list_head ptrace_entry;


struct pid *thread_pid;
struct hlist_node pid_links[PIDTYPE_MAX];
struct list_head thread_group;
struct list_head thread_node;

struct completion *vfork_done;


int *set_child_tid;


int *clear_child_tid;


void *worker_private;

u64 utime;
u64 stime;




u64 gtime;
struct prev_cputime prev_cputime;
# 1017 "./include/linux/sched.h"
unsigned long nvcsw;
unsigned long nivcsw;


u64 start_time;


u64 start_boottime;


unsigned long min_flt;
unsigned long maj_flt;


struct posix_cputimers posix_cputimers;
# 1040 "./include/linux/sched.h"
const struct cred *ptracer_cred;


const struct cred *real_cred;


const struct cred *cred;



struct key *cached_requested_key;
# 1060 "./include/linux/sched.h"
char comm[TASK_COMM_LEN];

struct nameidata *nameidata;


struct sysv_sem sysvsem;
struct sysv_shm sysvshm;


unsigned long last_switch_count;
unsigned long last_switch_time;


struct fs_struct *fs;


struct files_struct *files;


struct io_uring_task *io_uring;



struct nsproxy *nsproxy;


struct signal_struct *signal;
struct sighand_struct *sighand;
sigset_t blocked;
sigset_t real_blocked;

sigset_t saved_sigmask;
struct sigpending pending;
unsigned long sas_ss_sp;
size_t sas_ss_size;
unsigned int sas_ss_flags;

struct callback_head *task_works;
# 1106 "./include/linux/sched.h"
struct seccomp seccomp;
struct syscall_user_dispatch syscall_dispatch;


u64 parent_exec_id;
u64 self_exec_id;


spinlock_t alloc_lock;


raw_spinlock_t pi_lock;

struct wake_q_node wake_q;



struct rb_root_cached pi_waiters;

struct task_struct *pi_top_task;

struct rt_mutex_waiter *pi_blocked_on;




struct mutex_waiter *blocked_on;



int non_block_count;



struct irqtrace_events irqtrace;
unsigned int hardirq_threaded;
u64 hardirq_chain_key;
int softirqs_enabled;
int softirq_context;
int irq_config;







u64 curr_chain_key;
int lockdep_depth;
unsigned int lockdep_recursion;
struct held_lock held_locks[48UL];







void *journal_info;


struct bio_list *bio_list;


struct blk_plug *plug;


struct reclaim_state *reclaim_state;

struct backing_dev_info *backing_dev_info;

struct io_context *io_context;


struct capture_control *capture_control;


unsigned long ptrace_message;
kernel_siginfo_t *last_siginfo;

struct task_io_accounting ioac;
# 1209 "./include/linux/sched.h"
struct css_set *cgroups;

struct list_head cg_list;






struct robust_list_head *robust_list;



struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
struct mutex futex_exit_mutex;
unsigned int futex_state;


struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
struct mutex perf_event_mutex;
struct list_head perf_event_list;
# 1292 "./include/linux/sched.h"
struct rseq *rseq;
u32 rseq_sig;




unsigned long rseq_event_mask;


struct tlbflush_unmap_batch tlb_ubc;

union {
refcount_t rcu_users;
struct callback_head rcu;
};


struct pipe_inode_info *splice_pipe;

struct page_frag task_frag;
# 1325 "./include/linux/sched.h"
int nr_dirtied;
int nr_dirtied_pause;

unsigned long dirty_paused_when;
# 1338 "./include/linux/sched.h"
u64 timer_slack_ns;
u64 default_timer_slack_ns;
# 1361 "./include/linux/sched.h"
int curr_ret_stack;
int curr_ret_depth;


struct ftrace_ret_stack *ret_stack;


unsigned long long ftrace_timestamp;





atomic_t trace_overrun;


atomic_t tracing_graph_pause;




unsigned long trace;


unsigned long trace_recursion;
# 1430 "./include/linux/sched.h"
struct uprobe_task *utask;





struct kmap_ctrl kmap_ctrl;

unsigned long task_state_change;




int pagefault_disabled;

struct task_struct *oom_reaper_list;
struct timer_list oom_reaper_timer;


struct vm_struct *stack_vm_area;



refcount_t stack_refcount;
# 1464 "./include/linux/sched.h"
struct bpf_local_storage *bpf_storage;

struct bpf_run_ctx *bpf_ctx;
# 1486 "./include/linux/sched.h"
struct llist_head kretprobe_instances;
# 1509 "./include/linux/sched.h"
struct thread_struct thread;







};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pid *task_pid(struct task_struct *task)
{
return task->thread_pid;
}
# 1535 "./include/linux/sched.h"
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t task_pid_nr(struct task_struct *tsk)
{
return tsk->pid;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t task_pid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, ((void *)0));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t task_tgid_nr(struct task_struct *tsk)
{
return tsk->tgid;
}
# 1568 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pid_alive(const struct task_struct *p)
{
return p->thread_pid != ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t task_pgrp_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ((void *)0));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t task_session_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t task_tgid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
pid_t pid = 0;

rcu_read_lock();
if (pid_alive(tsk))
pid = task_tgid_nr_ns(({ typeof(*(tsk->real_parent)) *__UNIQUE_ID_rcu171 = (typeof(*(tsk->real_parent)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_172(void) ; if (!((sizeof((tsk->real_parent)) == sizeof(char) || sizeof((tsk->real_parent)) == sizeof(short) || sizeof((tsk->real_parent)) == sizeof(int) || sizeof((tsk->real_parent)) == sizeof(long)) || sizeof((tsk->real_parent)) == sizeof(long long))) __compiletime_assert_172(); } while (0); (*(const volatile typeof( _Generic(((tsk->real_parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tsk->real_parent)))) *)&((tsk->real_parent))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(tsk->real_parent)) *)(__UNIQUE_ID_rcu171)); }), ns);
rcu_read_unlock();

return pid;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t task_ppid_nr(const struct task_struct *tsk)
{
return task_ppid_nr_ns(tsk, &init_pid_ns);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pid_t task_pgrp_nr(struct task_struct *tsk)
{
return task_pgrp_nr_ns(tsk, &init_pid_ns);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int __task_state_index(unsigned int tsk_state,
unsigned int tsk_exit_state)
{
unsigned int state = (tsk_state | tsk_exit_state) & (0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040);

do { __attribute__((__noreturn__)) extern void __compiletime_assert_173(void) ; if (!(!(((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) == 0 || ((((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) - 1)) != 0)))) __compiletime_assert_173(); } while (0);

if (tsk_state == (0x0002 | 0x0400))
state = ((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1);






if (tsk_state == 0x1000)
state = 0x0002;

return fls(state);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int task_state_index(struct task_struct *tsk)
{
return __task_state_index(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_174(void) ; if (!((sizeof(tsk->__state) == sizeof(char) || sizeof(tsk->__state) == sizeof(short) || sizeof(tsk->__state) == sizeof(int) || sizeof(tsk->__state) == sizeof(long)) || sizeof(tsk->__state) == sizeof(long long))) __compiletime_assert_174(); } while (0); (*(const volatile typeof( _Generic((tsk->__state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (tsk->__state))) *)&(tsk->__state)); }), tsk->exit_state);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) char task_index_to_char(unsigned int state)
{
static const char state_char[] = "RSDTtXZPI";

do { __attribute__((__noreturn__)) extern void __compiletime_assert_175(void) ; if (!(!(1 + ( __builtin_constant_p((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) ? (((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) < 2 ? 0 : 63 - __builtin_clzll((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1))) : (sizeof((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) <= 4) ? __ilog2_u32((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) : __ilog2_u64((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) ) != sizeof(state_char) - 1))) __compiletime_assert_175(); } while (0);

return state_char[state];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) char task_state_to_char(struct task_struct *tsk)
{
return task_index_to_char(task_state_index(tsk));
}
# 1679 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int is_global_init(struct task_struct *tsk)
{
return task_tgid_nr(tsk) == 1;
}

extern struct pid *cad_pid;
# 1746 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool is_percpu_thread(void)
{

return (get_current()->flags & 0x04000000) &&
(get_current()->nr_cpus_allowed == 1);



}
# 1778 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool task_no_new_privs(struct task_struct *p) { return arch_test_bit(0, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_set_no_new_privs(struct task_struct *p) { set_bit(0, &p->atomic_flags); }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool task_spread_page(struct task_struct *p) { return arch_test_bit(1, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_set_spread_page(struct task_struct *p) { set_bit(1, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_clear_spread_page(struct task_struct *p) { clear_bit(1, &p->atomic_flags); }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool task_spread_slab(struct task_struct *p) { return arch_test_bit(2, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_set_spread_slab(struct task_struct *p) { set_bit(2, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_clear_spread_slab(struct task_struct *p) { clear_bit(2, &p->atomic_flags); }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool task_spec_ssb_disable(struct task_struct *p) { return arch_test_bit(3, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_set_spec_ssb_disable(struct task_struct *p) { set_bit(3, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_clear_spec_ssb_disable(struct task_struct *p) { clear_bit(3, &p->atomic_flags); }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool task_spec_ssb_noexec(struct task_struct *p) { return arch_test_bit(7, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_set_spec_ssb_noexec(struct task_struct *p) { set_bit(7, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_clear_spec_ssb_noexec(struct task_struct *p) { clear_bit(7, &p->atomic_flags); }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool task_spec_ssb_force_disable(struct task_struct *p) { return arch_test_bit(4, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_set_spec_ssb_force_disable(struct task_struct *p) { set_bit(4, &p->atomic_flags); }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool task_spec_ib_disable(struct task_struct *p) { return arch_test_bit(5, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_set_spec_ib_disable(struct task_struct *p) { set_bit(5, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_clear_spec_ib_disable(struct task_struct *p) { clear_bit(5, &p->atomic_flags); }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool task_spec_ib_force_disable(struct task_struct *p) { return arch_test_bit(6, &p->atomic_flags); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_set_spec_ib_force_disable(struct task_struct *p) { set_bit(6, &p->atomic_flags); }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
get_current()->flags &= ~flags;
get_current()->flags |= orig_flags & flags;
}

extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);

extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
extern void release_user_cpus_ptr(struct task_struct *p);
extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
# 1851 "./include/linux/sched.h"
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int task_nice(const struct task_struct *p)
{
return (((p)->static_prio) - (100 + (19 - -20 + 1) / 2));
}

extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
extern int available_idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern void sched_set_fifo(struct task_struct *p);
extern void sched_set_fifo_low(struct task_struct *p);
extern void sched_set_normal(struct task_struct *p, int nice);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool is_idle_task(const struct task_struct *p)
{
return !!(p->flags & 0x00000002);
}

extern struct task_struct *curr_task(int cpu);
extern void ia64_set_curr_task(int cpu, struct task_struct *p);

void yield(void);

union thread_union {

struct task_struct task;




unsigned long stack[(((1UL) << (12)) << (2 + 0))/sizeof(long)];
};





extern unsigned long init_stack[(((1UL) << (12)) << (2 + 0)) / sizeof(unsigned long)];
# 1928 "./include/linux/sched.h"
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);




extern struct task_struct *find_get_task_by_vpid(pid_t nr);

extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);


extern void kick_process(struct task_struct *tsk);




extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_task_comm(struct task_struct *tsk, const char *from)
{
__set_task_comm(tsk, from, false);
}

extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void scheduler_ipi(void)
{





do { if (test_ti_thread_flag(((struct thread_info *)get_current()), 3)) set_preempt_need_resched(); } while (0);
}
extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
# 1982 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
set_ti_thread_flag((&(tsk)->thread_info), flag);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
clear_ti_thread_flag((&(tsk)->thread_info), flag);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void update_tsk_thread_flag(struct task_struct *tsk, int flag,
bool value)
{
update_ti_thread_flag((&(tsk)->thread_info), flag, value);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_set_ti_thread_flag((&(tsk)->thread_info), flag);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_clear_ti_thread_flag((&(tsk)->thread_info), flag);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_ti_thread_flag((&(tsk)->thread_info), flag);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_tsk_need_resched(struct task_struct *tsk)
{
set_tsk_thread_flag(tsk,3);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_tsk_need_resched(struct task_struct *tsk)
{
clear_tsk_thread_flag(tsk,3);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_tsk_need_resched(struct task_struct *tsk)
{
return __builtin_expect(!!(test_tsk_thread_flag(tsk,3)), 0);
}
# 2035 "./include/linux/sched.h"
extern int __cond_resched(void);
# 2056 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int _cond_resched(void)
{
return __cond_resched();
}
# 2074 "./include/linux/sched.h"
extern int __cond_resched_lock(spinlock_t *lock);
extern int __cond_resched_rwlock_read(rwlock_t *lock);
extern int __cond_resched_rwlock_write(rwlock_t *lock);
# 2112 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cond_resched_rcu(void)
{

rcu_read_unlock();
({ __might_resched("include/linux/sched.h", 2116, 0); _cond_resched(); });
rcu_read_lock();

}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int spin_needbreak(spinlock_t *lock)
{



return 0;

}
# 2143 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rwlock_needbreak(rwlock_t *lock)
{



return 0;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool need_resched(void)
{
return __builtin_expect(!!(test_ti_thread_flag(((struct thread_info *)get_current()), 3)), 0);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int task_cpu(const struct task_struct *p)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_176(void) ; if (!((sizeof((&(p)->thread_info)->cpu) == sizeof(char) || sizeof((&(p)->thread_info)->cpu) == sizeof(short) || sizeof((&(p)->thread_info)->cpu) == sizeof(int) || sizeof((&(p)->thread_info)->cpu) == sizeof(long)) || sizeof((&(p)->thread_info)->cpu) == sizeof(long long))) __compiletime_assert_176(); } while (0); (*(const volatile typeof( _Generic(((&(p)->thread_info)->cpu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&(p)->thread_info)->cpu))) *)&((&(p)->thread_info)->cpu)); });
}

extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
# 2182 "./include/linux/sched.h"
extern bool sched_task_on_rq(struct task_struct *p);
extern unsigned long get_wchan(struct task_struct *p);
# 2194 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool vcpu_is_preempted(int cpu)
{
return false;
}


extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool owner_on_cpu(struct task_struct *owner)
{




return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_177(void) ; if (!((sizeof(owner->on_cpu) == sizeof(char) || sizeof(owner->on_cpu) == sizeof(short) || sizeof(owner->on_cpu) == sizeof(int) || sizeof(owner->on_cpu) == sizeof(long)) || sizeof(owner->on_cpu) == sizeof(long long))) __compiletime_assert_177(); } while (0); (*(const volatile typeof( _Generic((owner->on_cpu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (owner->on_cpu))) *)&(owner->on_cpu)); }) && !vcpu_is_preempted(task_cpu(owner));
}


unsigned long sched_cpu_util(int cpu, unsigned long max);
# 2227 "./include/linux/sched.h"
enum rseq_event_mask_bits {
RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
};

enum rseq_event_mask {
RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rseq_set_notify_resume(struct task_struct *t)
{
if (t->rseq)
set_tsk_thread_flag(t, 1);
}

void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rseq_handle_notify_resume(struct ksignal *ksig,
struct pt_regs *regs)
{
if (get_current()->rseq)
__rseq_handle_notify_resume(ksig, regs);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
arch___set_bit(RSEQ_EVENT_SIGNAL_BIT, &get_current()->rseq_event_mask);
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
rseq_handle_notify_resume(ksig, regs);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rseq_preempt(struct task_struct *t)
{
arch___set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
rseq_set_notify_resume(t);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rseq_migrate(struct task_struct *t)
{
arch___set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
rseq_set_notify_resume(t);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
if (clone_flags & 0x00000100) {
t->rseq = ((void *)0);
t->rseq_sig = 0;
t->rseq_event_mask = 0;
} else {
t->rseq = get_current()->rseq;
t->rseq_sig = get_current()->rseq_sig;
t->rseq_event_mask = get_current()->rseq_event_mask;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rseq_execve(struct task_struct *t)
{
t->rseq = ((void *)0);
t->rseq_sig = 0;
t->rseq_event_mask = 0;
}
# 2335 "./include/linux/sched.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rseq_syscall(struct pt_regs *regs)
{
}



const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);

const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);

int sched_trace_rq_cpu(struct rq *rq);
int sched_trace_rq_cpu_capacity(struct rq *rq);
int sched_trace_rq_nr_running(struct rq *rq);

const struct cpumask *sched_trace_rd_span(struct root_domain *rd);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sched_core_free(struct task_struct *tsk) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sched_core_fork(struct task_struct *p) { }
# 8 "./include/linux/sched/signal.h" 2
# 1 "./include/linux/sched/jobctl.h" 1






struct task_struct;
# 35 "./include/linux/sched/jobctl.h"
extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask);
extern void task_clear_jobctl_trapping(struct task_struct *task);
extern void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask);
# 9 "./include/linux/sched/signal.h" 2
# 1 "./include/linux/sched/task.h" 1
# 11 "./include/linux/sched/task.h"
# 1 "./include/linux/uaccess.h" 1




# 1 "./include/linux/fault-inject-usercopy.h" 1
# 18 "./include/linux/fault-inject-usercopy.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool should_fail_usercopy(void) { return false; }
# 6 "./include/linux/uaccess.h" 2





# 1 "./arch/riscv/include/asm/uaccess.h" 1
# 11 "./arch/riscv/include/asm/uaccess.h"
# 1 "./arch/riscv/include/asm/asm-extable.h" 1
# 29 "./arch/riscv/include/asm/asm-extable.h"
# 1 "./arch/riscv/include/asm/gpr-num.h" 1
# 30 "./arch/riscv/include/asm/asm-extable.h" 2
# 12 "./arch/riscv/include/asm/uaccess.h" 2
# 1 "./arch/riscv/include/asm/pgtable.h" 1
# 10 "./arch/riscv/include/asm/pgtable.h"
# 1 "./include/linux/sizes.h" 1
# 11 "./arch/riscv/include/asm/pgtable.h" 2

# 1 "./arch/riscv/include/asm/pgtable-bits.h" 1
# 13 "./arch/riscv/include/asm/pgtable.h" 2
# 108 "./arch/riscv/include/asm/pgtable.h"
# 1 "./arch/riscv/include/asm/tlbflush.h" 1
# 12 "./arch/riscv/include/asm/tlbflush.h"
# 1 "./arch/riscv/include/asm/errata_list.h" 1







# 1 "./arch/riscv/include/asm/alternative.h" 1
# 11 "./arch/riscv/include/asm/alternative.h"
# 1 "./arch/riscv/include/asm/alternative-macros.h" 1
# 12 "./arch/riscv/include/asm/alternative.h" 2






# 1 "./arch/riscv/include/asm/hwcap.h" 1
# 12 "./arch/riscv/include/asm/hwcap.h"
# 1 "./arch/riscv/include/uapi/asm/hwcap.h" 1
# 13 "./arch/riscv/include/asm/hwcap.h" 2








enum {
CAP_HWCAP = 1,
};

extern unsigned long elf_hwcap;
# 53 "./arch/riscv/include/asm/hwcap.h"
enum riscv_isa_ext_id {
RISCV_ISA_EXT_SSCOFPMF = 26,
RISCV_ISA_EXT_ID_MAX = 64,
};

struct riscv_isa_ext_data {

char uprop[32];

unsigned int isa_ext_id;
};

unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);



bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
# 19 "./arch/riscv/include/asm/alternative.h" 2

void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) apply_boot_alternatives(void);

struct alt_entry {
void *old_ptr;
void *alt_ptr;
unsigned long vendor_id;
unsigned long alt_len;
unsigned int errata_id;
} __attribute__((__packed__));

struct errata_checkfunc_id {
unsigned long vendor_id;
bool (*func)(struct alt_entry *alt);
};

void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid);
# 9 "./arch/riscv/include/asm/errata_list.h" 2
# 1 "./arch/riscv/include/asm/vendorid_list.h" 1
# 10 "./arch/riscv/include/asm/errata_list.h" 2
# 13 "./arch/riscv/include/asm/tlbflush.h" 2


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void local_flush_tlb_all(void)
{
__asm__ __volatile__ ("sfence.vma" : : : "memory");
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void local_flush_tlb_page(unsigned long addr)
{
asm("886 :\n" "sfence.vma %0" "\n" "887 :\n" ".if " "1" " == 1\n" ".pushsection .alternative, \"a\"\n" ".dword" " " "886b" "\n" ".dword" " " "888f" "\n" ".dword" " " "0x489" "\n" ".dword" " " "889f - 888f" "\n" ".word " "1" "\n" ".popsection\n" ".subsection 1\n" "888 :\n" "sfence.vma" "\n" "889 :\n" ".previous\n" ".org . - (887b - 886b) + (889b - 888b)\n" ".org . - (889b - 888b) + (887b - 886b)\n" ".endif\n" : : "r" (addr) : "memory");
}






void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);


void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
# 56 "./arch/riscv/include/asm/tlbflush.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
}
# 109 "./arch/riscv/include/asm/pgtable.h" 2



# 1 "./arch/riscv/include/asm/pgtable-64.h" 1
# 11 "./arch/riscv/include/asm/pgtable-64.h"
extern bool pgtable_l4_enabled;
extern bool pgtable_l5_enabled;
# 41 "./arch/riscv/include/asm/pgtable-64.h"
typedef struct {
unsigned long p4d;
} p4d_t;






typedef struct {
unsigned long pud;
} pud_t;






typedef struct {
unsigned long pmd;
} pmd_t;






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_present(pud_t pud)
{
return (((pud).pud) & (1 << 0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_none(pud_t pud)
{
return (((pud).pud) == 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_bad(pud_t pud)
{
return !pud_present(pud);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_leaf(pud_t pud)
{
return pud_present(pud) && (((pud).pud) & ((1 << 1) | (1 << 2) | (1 << 3)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_pud(pud_t *pudp, pud_t pud)
{
*pudp = pud;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pud_clear(pud_t *pudp)
{
set_pud(pudp, ((pud_t) { (0) }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pud_t pfn_pud(unsigned long pfn, pgprot_t prot)
{
return ((pud_t) { ((pfn << 10) | ((prot).pgprot)) });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long _pud_pfn(pud_t pud)
{
return ((pud).pud) >> 10;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t *pud_pgtable(pud_t pud)
{
return (pmd_t *)(((void *)((void *)((unsigned long)((phys_addr_t)((((phys_addr_t)(((pud).pud) >> 10) << (12))))) + kernel_map.va_pa_offset))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *pud_page(pud_t pud)
{
return (((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (((pud).pud) >> 10));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mm_p4d_folded(struct mm_struct *mm)
{
if (pgtable_l5_enabled)
return false;

return true;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mm_pud_folded(struct mm_struct *mm)
{
if (pgtable_l4_enabled)
return false;

return true;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
{
return ((pmd_t) { ((pfn << 10) | ((prot).pgprot)) });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long _pmd_pfn(pmd_t pmd)
{
return ((pmd).pmd) >> 10;
}
# 160 "./arch/riscv/include/asm/pgtable-64.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_p4d(p4d_t *p4dp, p4d_t p4d)
{
if (pgtable_l4_enabled)
*p4dp = p4d;
else
set_pud((pud_t *)p4dp, (pud_t){ ((p4d).p4d) });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int p4d_none(p4d_t p4d)
{
if (pgtable_l4_enabled)
return (((p4d).p4d) == 0);

return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int p4d_present(p4d_t p4d)
{
if (pgtable_l4_enabled)
return (((p4d).p4d) & (1 << 0));

return 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int p4d_bad(p4d_t p4d)
{
if (pgtable_l4_enabled)
return !p4d_present(p4d);

return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void p4d_clear(p4d_t *p4d)
{
if (pgtable_l4_enabled)
set_p4d(p4d, ((p4d_t) { (0) }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) p4d_t pfn_p4d(unsigned long pfn, pgprot_t prot)
{
return ((p4d_t) { ((pfn << 10) | ((prot).pgprot)) });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long _p4d_pfn(p4d_t p4d)
{
return ((p4d).p4d) >> 10;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pud_t *p4d_pgtable(p4d_t p4d)
{
if (pgtable_l4_enabled)
return (pud_t *)(((void *)((void *)((unsigned long)((phys_addr_t)((((phys_addr_t)(((p4d).p4d) >> 10) << (12))))) + kernel_map.va_pa_offset))));

return (pud_t *)pud_pgtable((pud_t) { ((p4d).p4d) });
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *p4d_page(p4d_t p4d)
{
return (((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (((p4d).p4d) >> 10));
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
if (pgtable_l4_enabled)
return p4d_pgtable(*p4d) + (((address) >> 30) & ((((1UL) << (12)) / sizeof(pud_t)) - 1));

return (pud_t *)p4d;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_pgd(pgd_t *pgdp, pgd_t pgd)
{
if (pgtable_l5_enabled)
*pgdp = pgd;
else
set_p4d((p4d_t *)pgdp, (p4d_t){ ((pgd).pgd) });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pgd_none(pgd_t pgd)
{
if (pgtable_l5_enabled)
return (((pgd).pgd) == 0);

return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pgd_present(pgd_t pgd)
{
if (pgtable_l5_enabled)
return (((pgd).pgd) & (1 << 0));

return 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pgd_bad(pgd_t pgd)
{
if (pgtable_l5_enabled)
return !pgd_present(pgd);

return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pgd_clear(pgd_t *pgd)
{
if (pgtable_l5_enabled)
set_pgd(pgd, ((pgd_t) { (0) }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) p4d_t *pgd_pgtable(pgd_t pgd)
{
if (pgtable_l5_enabled)
return (p4d_t *)(((void *)((void *)((unsigned long)((phys_addr_t)((((phys_addr_t)(((pgd).pgd) >> 10) << (12))))) + kernel_map.va_pa_offset))));

return (p4d_t *)p4d_pgtable((p4d_t) { ((pgd).pgd) });
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *pgd_page(pgd_t pgd)
{
return (((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (((pgd).pgd) >> 10));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{
if (pgtable_l5_enabled)
return pgd_pgtable(*pgd) + (((address) >> 39) & ((((1UL) << (12)) / sizeof(p4d_t)) - 1));

return (p4d_t *)pgd;
}
# 113 "./arch/riscv/include/asm/pgtable.h" 2
# 129 "./arch/riscv/include/asm/pgtable.h"
struct pt_alloc_ops {
pte_t *(*get_pte_virt)(phys_addr_t pa);
phys_addr_t (*alloc_pte)(uintptr_t va);

pmd_t *(*get_pmd_virt)(phys_addr_t pa);
phys_addr_t (*alloc_pmd)(uintptr_t va);
pud_t *(*get_pud_virt)(phys_addr_t pa);
phys_addr_t (*alloc_pud)(uintptr_t va);
p4d_t *(*get_p4d_virt)(phys_addr_t pa);
phys_addr_t (*alloc_p4d)(uintptr_t va);

};

extern struct pt_alloc_ops pt_ops __attribute__((__section__(".init.data")));
# 186 "./arch/riscv/include/asm/pgtable.h"
extern pgd_t swapper_pg_dir[];
# 209 "./arch/riscv/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_present(pmd_t pmd)
{






return (((pmd).pmd) & ((1 << 0) | (1 << 5) | ((1 << 1) | (1 << 2) | (1 << 3))));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_none(pmd_t pmd)
{
return (((pmd).pmd) == 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_bad(pmd_t pmd)
{
return !pmd_present(pmd) || (((pmd).pmd) & ((1 << 1) | (1 << 2) | (1 << 3)));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_leaf(pmd_t pmd)
{
return pmd_present(pmd) && (((pmd).pmd) & ((1 << 1) | (1 << 2) | (1 << 3)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pmd_clear(pmd_t *pmdp)
{
set_pmd(pmdp, ((pmd_t) { (0) }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
{
return ((pgd_t) { ((pfn << 10) | ((prot).pgprot)) });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long _pgd_pfn(pgd_t pgd)
{
return ((pgd).pgd) >> 10;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *pmd_page(pmd_t pmd)
{
return (((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (((pmd).pmd) >> 10));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long pmd_page_vaddr(pmd_t pmd)
{
return (unsigned long)(((void *)((void *)((unsigned long)((phys_addr_t)((((phys_addr_t)(((pmd).pmd) >> 10) << (12))))) + kernel_map.va_pa_offset))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pmd_pte(pmd_t pmd)
{
return ((pte_t) { (((pmd).pmd)) });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pud_pte(pud_t pud)
{
return ((pte_t) { (((pud).pud)) });
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long pte_pfn(pte_t pte)
{
return (((pte).pte) >> 10);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
return ((pte_t) { ((pfn << 10) | ((prot).pgprot)) });
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_present(pte_t pte)
{
return (((pte).pte) & ((1 << 0) | (1 << 5)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_none(pte_t pte)
{
return (((pte).pte) == 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_write(pte_t pte)
{
return ((pte).pte) & (1 << 2);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_exec(pte_t pte)
{
return ((pte).pte) & (1 << 3);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_huge(pte_t pte)
{
return pte_present(pte) && (((pte).pte) & ((1 << 1) | (1 << 2) | (1 << 3)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_dirty(pte_t pte)
{
return ((pte).pte) & (1 << 7);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_young(pte_t pte)
{
return ((pte).pte) & (1 << 6);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_special(pte_t pte)
{
return ((pte).pte) & (1 << 8);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_wrprotect(pte_t pte)
{
return ((pte_t) { (((pte).pte) & ~((1 << 2))) });
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_mkwrite(pte_t pte)
{
return ((pte_t) { (((pte).pte) | (1 << 2)) });
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_mkdirty(pte_t pte)
{
return ((pte_t) { (((pte).pte) | (1 << 7)) });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_mkclean(pte_t pte)
{
return ((pte_t) { (((pte).pte) & ~((1 << 7))) });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_mkyoung(pte_t pte)
{
return ((pte_t) { (((pte).pte) | (1 << 6)) });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_mkold(pte_t pte)
{
return ((pte_t) { (((pte).pte) & ~((1 << 6))) });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_mkspecial(pte_t pte)
{
return ((pte_t) { (((pte).pte) | (1 << 8)) });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_mkhuge(pte_t pte)
{
return pte;
}
# 400 "./arch/riscv/include/asm/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return ((pte_t) { ((((pte).pte) & (~(unsigned long)((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4) | (1 << 5)))) | ((newprot).pgprot)) });
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{







local_flush_tlb_page(address);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
pte_t *ptep = (pte_t *)pmdp;

update_mmu_cache(vma, address, ptep);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_same(pte_t pte_a, pte_t pte_b)
{
return ((pte_a).pte) == ((pte_b).pte);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_pte(pte_t *ptep, pte_t pteval)
{
*ptep = pteval;
}

void flush_icache_pte(pte_t pte);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_pte_at(struct mm_struct *mm,
unsigned long addr, pte_t *ptep, pte_t pteval)
{
if (pte_present(pteval) && pte_exec(pteval))
flush_icache_pte(pteval);

set_pte(ptep, pteval);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
set_pte_at(mm, addr, ptep, ((pte_t) { (0) }));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
if (!pte_same(*ptep, entry))
set_pte_at(vma->vm_mm, address, ptep, entry);




return true;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
return ((pte_t) { (atomic_long_xchg((atomic_long_t *)ptep, 0)) });
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pte_t *ptep)
{
if (!pte_young(*ptep))
return 0;
return test_and_clear_bit(6, &((*ptep).pte));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
atomic_long_and(~(unsigned long)(1 << 2), (atomic_long_t *)ptep);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
# 521 "./arch/riscv/include/asm/pgtable.h"
return ptep_test_and_clear_young(vma, address, ptep);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pte_pmd(pte_t pte)
{
return ((pmd_t) { (((pte).pte)) });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_mkhuge(pmd_t pmd)
{
return pmd;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_mkinvalid(pmd_t pmd)
{
return ((pmd_t) { (((pmd).pmd) & ~((1 << 0)|(1 << 5))) });
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long pmd_pfn(pmd_t pmd)
{
return (((((pmd).pmd) >> 10 << (12)) & (~(((1UL) << 21) - 1))) >> (12));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_write(pmd_t pmd)
{
return pte_write(pmd_pte(pmd));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_dirty(pmd_t pmd)
{
return pte_dirty(pmd_pte(pmd));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_young(pmd_t pmd)
{
return pte_young(pmd_pte(pmd));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_mkold(pmd_t pmd)
{
return pte_pmd(pte_mkold(pmd_pte(pmd)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_mkyoung(pmd_t pmd)
{
return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_mkwrite(pmd_t pmd)
{
return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_wrprotect(pmd_t pmd)
{
return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_mkclean(pmd_t pmd)
{
return pte_pmd(pte_mkclean(pmd_pte(pmd)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_mkdirty(pmd_t pmd)
{
return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
return set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
return set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_trans_huge(pmd_t pmd)
{
return pmd_leaf(pmd);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty)
{
return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
return ((pmd_t) { (atomic_long_xchg((atomic_long_t *)pmdp, ((pmd).pmd))) });
}
# 730 "./arch/riscv/include/asm/pgtable.h"
extern char _start[];
extern void *_dtb_early_va;
extern uintptr_t _dtb_early_pa;







extern u64 satp_mode;
extern bool pgtable_l4_enabled;

void paging_init(void);
void misc_mem_init(void);





extern unsigned long empty_zero_page[((1UL) << (12)) / sizeof(unsigned long)];
# 13 "./arch/riscv/include/asm/uaccess.h" 2
# 22 "./arch/riscv/include/asm/uaccess.h"
# 1 "./arch/riscv/include/asm/extable.h" 1
# 18 "./arch/riscv/include/asm/extable.h"
struct exception_table_entry {
int insn, fixup;
short type, data;
};
# 35 "./arch/riscv/include/asm/extable.h"
bool fixup_exception(struct pt_regs *regs);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
return false;
}
# 23 "./arch/riscv/include/asm/uaccess.h" 2

# 1 "./include/asm-generic/access_ok.h" 1
# 31 "./include/asm-generic/access_ok.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __access_ok(const void *ptr, unsigned long size)
{
unsigned long limit = (((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30))) * (((1UL) << (12)) / sizeof(pgd_t)) / 2);
unsigned long addr = (unsigned long)ptr;

if (0 ||
!1)
return true;

return (size <= limit) && (addr <= (limit - size));
}
# 25 "./arch/riscv/include/asm/uaccess.h" 2
# 288 "./arch/riscv/include/asm/uaccess.h"
unsigned long __attribute__((__warn_unused_result__)) __asm_copy_to_user(void *to,
const void *from, unsigned long n);
unsigned long __attribute__((__warn_unused_result__)) __asm_copy_from_user(void *to,
const void *from, unsigned long n);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
raw_copy_from_user(void *to, const void *from, unsigned long n)
{
return __asm_copy_from_user(to, from, n);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
raw_copy_to_user(void *to, const void *from, unsigned long n)
{
return __asm_copy_to_user(to, from, n);
}

extern long strncpy_from_user(char *dest, const char *src, long count);

extern long __attribute__((__warn_unused_result__)) strnlen_user(const char *str, long n);

extern
unsigned long __attribute__((__warn_unused_result__)) __clear_user(void *addr, unsigned long n);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
unsigned long __attribute__((__warn_unused_result__)) clear_user(void *to, unsigned long n)
{
__might_fault("arch/riscv/include/asm/uaccess.h", 315);
return __builtin_expect(!!(__access_ok(to, n)), 1) ?
__clear_user(to, n) : n;
}
# 12 "./include/linux/uaccess.h" 2
# 58 "./include/linux/uaccess.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long
__copy_from_user_inatomic(void *to, const void *from, unsigned long n)
{
instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long
__copy_from_user(void *to, const void *from, unsigned long n)
{
__might_fault("include/linux/uaccess.h", 69);
if (should_fail_usercopy())
return n;
instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
# 90 "./include/linux/uaccess.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long
__copy_to_user_inatomic(void *to, const void *from, unsigned long n)
{
if (should_fail_usercopy())
return n;
instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long
__copy_to_user(void *to, const void *from, unsigned long n)
{
__might_fault("include/linux/uaccess.h", 103);
if (should_fail_usercopy())
return n;
instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
# 126 "./include/linux/uaccess.h"
extern __attribute__((__warn_unused_result__)) unsigned long
_copy_from_user(void *, const void *, unsigned long);
# 144 "./include/linux/uaccess.h"
extern __attribute__((__warn_unused_result__)) unsigned long
_copy_to_user(void *, const void *, unsigned long);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned long __attribute__((__warn_unused_result__))
copy_from_user(void *to, const void *from, unsigned long n)
{
if (__builtin_expect(!!(check_copy_size(to, n, false)), 1))
n = _copy_from_user(to, from, n);
return n;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned long __attribute__((__warn_unused_result__))
copy_to_user(void *to, const void *from, unsigned long n)
{
if (__builtin_expect(!!(check_copy_size(from, n, true)), 1))
n = _copy_to_user(to, from, n);
return n;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long __attribute__((__warn_unused_result__))
copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
{
memcpy(dst, src, cnt);
return 0;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void pagefault_disabled_inc(void)
{
get_current()->pagefault_disabled++;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void pagefault_disabled_dec(void)
{
get_current()->pagefault_disabled--;
}
# 194 "./include/linux/uaccess.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pagefault_disable(void)
{
pagefault_disabled_inc();




__asm__ __volatile__("": : :"memory");
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pagefault_enable(void)
{




__asm__ __volatile__("": : :"memory");
pagefault_disabled_dec();
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pagefault_disabled(void)
{
return get_current()->pagefault_disabled != 0;
}
# 236 "./include/linux/uaccess.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) unsigned long
__copy_from_user_inatomic_nocache(void *to, const void *from,
unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}



extern __attribute__((__warn_unused_result__)) int check_zeroed_user(const void *from, size_t size);
# 294 "./include/linux/uaccess.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) int
copy_struct_from_user(void *dst, size_t ksize, const void *src,
size_t usize)
{
size_t size = __builtin_choose_expr(((!!(sizeof((typeof(ksize) *)1 == (typeof(usize) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(ksize) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(usize) * 0l)) : (int *)8))))), ((ksize) < (usize) ? (ksize) : (usize)), ({ typeof(ksize) __UNIQUE_ID___x178 = (ksize); typeof(usize) __UNIQUE_ID___y179 = (usize); ((__UNIQUE_ID___x178) < (__UNIQUE_ID___y179) ? (__UNIQUE_ID___x178) : (__UNIQUE_ID___y179)); }));
size_t rest = __builtin_choose_expr(((!!(sizeof((typeof(ksize) *)1 == (typeof(usize) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(ksize) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(usize) * 0l)) : (int *)8))))), ((ksize) > (usize) ? (ksize) : (usize)), ({ typeof(ksize) __UNIQUE_ID___x180 = (ksize); typeof(usize) __UNIQUE_ID___y181 = (usize); ((__UNIQUE_ID___x180) > (__UNIQUE_ID___y181) ? (__UNIQUE_ID___x180) : (__UNIQUE_ID___y181)); })) - size;


if (usize < ksize) {
memset(dst + size, 0, rest);
} else if (usize > ksize) {
int ret = check_zeroed_user(src + size, rest);
if (ret <= 0)
return ret ?: -7;
}

if (copy_from_user(dst, src, size))
return -14;
return 0;
}

bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);

long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
long __attribute__((patchable_function_entry(0, 0))) copy_to_kernel_nofault(void *dst, const void *src, size_t size);

long copy_from_user_nofault(void *dst, const void *src, size_t size);
long __attribute__((patchable_function_entry(0, 0))) copy_to_user_nofault(void *dst, const void *src,
size_t size);

long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
long count);

long strncpy_from_user_nofault(char *dst, const void *unsafe_addr,
long count);
long strnlen_user_nofault(const void *unsafe_addr, long count);
# 370 "./include/linux/uaccess.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long user_access_save(void) { return 0UL; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void user_access_restore(unsigned long flags) { }
# 12 "./include/linux/sched/task.h" 2

struct task_struct;
struct rusage;
union thread_union;
struct css_set;




struct kernel_clone_args {
u64 flags;
int *pidfd;
int *child_tid;
int *parent_tid;
int exit_signal;
unsigned long stack;
unsigned long stack_size;
unsigned long tls;
pid_t *set_tid;

size_t set_tid_size;
int cgroup;
int io_thread;
struct cgroup *cgrp;
struct css_set *cset;
};







extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;

extern union thread_union init_thread_union;
extern struct task_struct init_task;

extern int lockdep_tasklist_lock_is_held(void);

extern void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);

extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);

void __attribute__((__noreturn__)) do_task_dead(void);
void __attribute__((__noreturn__)) make_task_dead(int signr);

extern void proc_caches_init(void);

extern void fork_init(void);

extern void release_task(struct task_struct * p);

extern int copy_thread(unsigned long, unsigned long, unsigned long,
struct task_struct *, unsigned long);

extern void flush_thread(void);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void exit_thread(struct task_struct *tsk)
{
}

extern __attribute__((__noreturn__)) void do_group_exit(int);

extern void exit_files(struct task_struct *);
extern void exit_itimers(struct signal_struct *);

extern pid_t kernel_clone(struct kernel_clone_args *kargs);
struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
struct task_struct *fork_idle(int);
struct mm_struct *copy_init_mm(void);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern long kernel_wait4(pid_t, int *, int, struct rusage *);
int kernel_wait(pid_t pid, int *stat);

extern void free_task(struct task_struct *tsk);



extern void sched_exec(void);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct task_struct *get_task_struct(struct task_struct *t)
{
refcount_inc(&t->usage);
return t;
}

extern void __put_task_struct(struct task_struct *t);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_task_struct(struct task_struct *t)
{
if (refcount_dec_and_test(&t->usage))
__put_task_struct(t);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_task_struct_many(struct task_struct *t, int nr)
{
if (refcount_sub_and_test(nr, &t->usage))
__put_task_struct(t);
}

void put_task_struct_rcu_user(struct task_struct *task);
# 147 "./include/linux/sched/task.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct vm_struct *task_stack_vm_area(const struct task_struct *t)
{
return t->stack_vm_area;
}
# 168 "./include/linux/sched/task.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_lock(struct task_struct *p)
{
spin_lock(&p->alloc_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void task_unlock(struct task_struct *p)
{
spin_unlock(&p->alloc_lock);
}
# 10 "./include/linux/sched/signal.h" 2
# 1 "./include/linux/cred.h" 1
# 13 "./include/linux/cred.h"
# 1 "./include/linux/key.h" 1
# 17 "./include/linux/key.h"
# 1 "./include/linux/sysctl.h" 1
# 30 "./include/linux/sysctl.h"
# 1 "./include/uapi/linux/sysctl.h" 1
# 35 "./include/uapi/linux/sysctl.h"
struct __sysctl_args {
int *name;
int nlen;
void *oldval;
size_t *oldlenp;
void *newval;
size_t newlen;
unsigned long __unused[4];
};





enum
{
CTL_KERN=1,
CTL_VM=2,
CTL_NET=3,
CTL_PROC=4,
CTL_FS=5,
CTL_DEBUG=6,
CTL_DEV=7,
CTL_BUS=8,
CTL_ABI=9,
CTL_CPU=10,
CTL_ARLAN=254,
CTL_S390DBF=5677,
CTL_SUNRPC=7249,
CTL_PM=9899,
CTL_FRV=9898,
};


enum
{
CTL_BUS_ISA=1
};


enum
{
INOTIFY_MAX_USER_INSTANCES=1,
INOTIFY_MAX_USER_WATCHES=2,
INOTIFY_MAX_QUEUED_EVENTS=3
};


enum
{
KERN_OSTYPE=1,
KERN_OSRELEASE=2,
KERN_OSREV=3,
KERN_VERSION=4,
KERN_SECUREMASK=5,
KERN_PROF=6,
KERN_NODENAME=7,
KERN_DOMAINNAME=8,

KERN_PANIC=15,
KERN_REALROOTDEV=16,

KERN_SPARC_REBOOT=21,
KERN_CTLALTDEL=22,
KERN_PRINTK=23,
KERN_NAMETRANS=24,
KERN_PPC_HTABRECLAIM=25,
KERN_PPC_ZEROPAGED=26,
KERN_PPC_POWERSAVE_NAP=27,
KERN_MODPROBE=28,
KERN_SG_BIG_BUFF=29,
KERN_ACCT=30,
KERN_PPC_L2CR=31,

KERN_RTSIGNR=32,
KERN_RTSIGMAX=33,

KERN_SHMMAX=34,
KERN_MSGMAX=35,
KERN_MSGMNB=36,
KERN_MSGPOOL=37,
KERN_SYSRQ=38,
KERN_MAX_THREADS=39,
KERN_RANDOM=40,
KERN_SHMALL=41,
KERN_MSGMNI=42,
KERN_SEM=43,
KERN_SPARC_STOP_A=44,
KERN_SHMMNI=45,
KERN_OVERFLOWUID=46,
KERN_OVERFLOWGID=47,
KERN_SHMPATH=48,
KERN_HOTPLUG=49,
KERN_IEEE_EMULATION_WARNINGS=50,
KERN_S390_USER_DEBUG_LOGGING=51,
KERN_CORE_USES_PID=52,
KERN_TAINTED=53,
KERN_CADPID=54,
KERN_PIDMAX=55,
KERN_CORE_PATTERN=56,
KERN_PANIC_ON_OOPS=57,
KERN_HPPA_PWRSW=58,
KERN_HPPA_UNALIGNED=59,
KERN_PRINTK_RATELIMIT=60,
KERN_PRINTK_RATELIMIT_BURST=61,
KERN_PTY=62,
KERN_NGROUPS_MAX=63,
KERN_SPARC_SCONS_PWROFF=64,
KERN_HZ_TIMER=65,
KERN_UNKNOWN_NMI_PANIC=66,
KERN_BOOTLOADER_TYPE=67,
KERN_RANDOMIZE=68,
KERN_SETUID_DUMPABLE=69,
KERN_SPIN_RETRY=70,
KERN_ACPI_VIDEO_FLAGS=71,
KERN_IA64_UNALIGNED=72,
KERN_COMPAT_LOG=73,
KERN_MAX_LOCK_DEPTH=74,
KERN_NMI_WATCHDOG=75,
KERN_PANIC_ON_NMI=76,
KERN_PANIC_ON_WARN=77,
KERN_PANIC_PRINT=78,
};




enum
{
VM_UNUSED1=1,
VM_UNUSED2=2,
VM_UNUSED3=3,
VM_UNUSED4=4,
VM_OVERCOMMIT_MEMORY=5,
VM_UNUSED5=6,
VM_UNUSED7=7,
VM_UNUSED8=8,
VM_UNUSED9=9,
VM_PAGE_CLUSTER=10,
VM_DIRTY_BACKGROUND=11,
VM_DIRTY_RATIO=12,
VM_DIRTY_WB_CS=13,
VM_DIRTY_EXPIRE_CS=14,
VM_NR_PDFLUSH_THREADS=15,
VM_OVERCOMMIT_RATIO=16,
VM_PAGEBUF=17,
VM_HUGETLB_PAGES=18,
VM_SWAPPINESS=19,
VM_LOWMEM_RESERVE_RATIO=20,
VM_MIN_FREE_KBYTES=21,
VM_MAX_MAP_COUNT=22,
VM_LAPTOP_MODE=23,
VM_BLOCK_DUMP=24,
VM_HUGETLB_GROUP=25,
VM_VFS_CACHE_PRESSURE=26,
VM_LEGACY_VA_LAYOUT=27,
VM_SWAP_TOKEN_TIMEOUT=28,
VM_DROP_PAGECACHE=29,
VM_PERCPU_PAGELIST_FRACTION=30,
VM_ZONE_RECLAIM_MODE=31,
VM_MIN_UNMAPPED=32,
VM_PANIC_ON_OOM=33,
VM_VDSO_ENABLED=34,
VM_MIN_SLAB=35,
};



enum
{
NET_CORE=1,
NET_ETHER=2,
NET_802=3,
NET_UNIX=4,
NET_IPV4=5,
NET_IPX=6,
NET_ATALK=7,
NET_NETROM=8,
NET_AX25=9,
NET_BRIDGE=10,
NET_ROSE=11,
NET_IPV6=12,
NET_X25=13,
NET_TR=14,
NET_DECNET=15,
NET_ECONET=16,
NET_SCTP=17,
NET_LLC=18,
NET_NETFILTER=19,
NET_DCCP=20,
NET_IRDA=412,
};


enum
{
RANDOM_POOLSIZE=1,
RANDOM_ENTROPY_COUNT=2,
RANDOM_READ_THRESH=3,
RANDOM_WRITE_THRESH=4,
RANDOM_BOOT_ID=5,
RANDOM_UUID=6
};


enum
{
PTY_MAX=1,
PTY_NR=2
};


enum
{
BUS_ISA_MEM_BASE=1,
BUS_ISA_PORT_BASE=2,
BUS_ISA_PORT_SHIFT=3
};


enum
{
NET_CORE_WMEM_MAX=1,
NET_CORE_RMEM_MAX=2,
NET_CORE_WMEM_DEFAULT=3,
NET_CORE_RMEM_DEFAULT=4,

NET_CORE_MAX_BACKLOG=6,
NET_CORE_FASTROUTE=7,
NET_CORE_MSG_COST=8,
NET_CORE_MSG_BURST=9,
NET_CORE_OPTMEM_MAX=10,
NET_CORE_HOT_LIST_LENGTH=11,
NET_CORE_DIVERT_VERSION=12,
NET_CORE_NO_CONG_THRESH=13,
NET_CORE_NO_CONG=14,
NET_CORE_LO_CONG=15,
NET_CORE_MOD_CONG=16,
NET_CORE_DEV_WEIGHT=17,
NET_CORE_SOMAXCONN=18,
NET_CORE_BUDGET=19,
NET_CORE_AEVENT_ETIME=20,
NET_CORE_AEVENT_RSEQTH=21,
NET_CORE_WARNINGS=22,
};







enum
{
NET_UNIX_DESTROY_DELAY=1,
NET_UNIX_DELETE_DELAY=2,
NET_UNIX_MAX_DGRAM_QLEN=3,
};


enum
{
NET_NF_CONNTRACK_MAX=1,
NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
NET_NF_CONNTRACK_UDP_TIMEOUT=10,
NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
NET_NF_CONNTRACK_ICMP_TIMEOUT=12,
NET_NF_CONNTRACK_GENERIC_TIMEOUT=13,
NET_NF_CONNTRACK_BUCKETS=14,
NET_NF_CONNTRACK_LOG_INVALID=15,
NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
NET_NF_CONNTRACK_TCP_LOOSE=17,
NET_NF_CONNTRACK_TCP_BE_LIBERAL=18,
NET_NF_CONNTRACK_TCP_MAX_RETRANS=19,
NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
NET_NF_CONNTRACK_COUNT=27,
NET_NF_CONNTRACK_ICMPV6_TIMEOUT=28,
NET_NF_CONNTRACK_FRAG6_TIMEOUT=29,
NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30,
NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31,
NET_NF_CONNTRACK_CHECKSUM=32,
};


enum
{

NET_IPV4_FORWARD=8,
NET_IPV4_DYNADDR=9,

NET_IPV4_CONF=16,
NET_IPV4_NEIGH=17,
NET_IPV4_ROUTE=18,
NET_IPV4_FIB_HASH=19,
NET_IPV4_NETFILTER=20,

NET_IPV4_TCP_TIMESTAMPS=33,
NET_IPV4_TCP_WINDOW_SCALING=34,
NET_IPV4_TCP_SACK=35,
NET_IPV4_TCP_RETRANS_COLLAPSE=36,
NET_IPV4_DEFAULT_TTL=37,
NET_IPV4_AUTOCONFIG=38,
NET_IPV4_NO_PMTU_DISC=39,
NET_IPV4_TCP_SYN_RETRIES=40,
NET_IPV4_IPFRAG_HIGH_THRESH=41,
NET_IPV4_IPFRAG_LOW_THRESH=42,
NET_IPV4_IPFRAG_TIME=43,
NET_IPV4_TCP_MAX_KA_PROBES=44,
NET_IPV4_TCP_KEEPALIVE_TIME=45,
NET_IPV4_TCP_KEEPALIVE_PROBES=46,
NET_IPV4_TCP_RETRIES1=47,
NET_IPV4_TCP_RETRIES2=48,
NET_IPV4_TCP_FIN_TIMEOUT=49,
NET_IPV4_IP_MASQ_DEBUG=50,
NET_TCP_SYNCOOKIES=51,
NET_TCP_STDURG=52,
NET_TCP_RFC1337=53,
NET_TCP_SYN_TAILDROP=54,
NET_TCP_MAX_SYN_BACKLOG=55,
NET_IPV4_LOCAL_PORT_RANGE=56,
NET_IPV4_ICMP_ECHO_IGNORE_ALL=57,
NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS=58,
NET_IPV4_ICMP_SOURCEQUENCH_RATE=59,
NET_IPV4_ICMP_DESTUNREACH_RATE=60,
NET_IPV4_ICMP_TIMEEXCEED_RATE=61,
NET_IPV4_ICMP_PARAMPROB_RATE=62,
NET_IPV4_ICMP_ECHOREPLY_RATE=63,
NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES=64,
NET_IPV4_IGMP_MAX_MEMBERSHIPS=65,
NET_TCP_TW_RECYCLE=66,
NET_IPV4_ALWAYS_DEFRAG=67,
NET_IPV4_TCP_KEEPALIVE_INTVL=68,
NET_IPV4_INET_PEER_THRESHOLD=69,
NET_IPV4_INET_PEER_MINTTL=70,
NET_IPV4_INET_PEER_MAXTTL=71,
NET_IPV4_INET_PEER_GC_MINTIME=72,
NET_IPV4_INET_PEER_GC_MAXTIME=73,
NET_TCP_ORPHAN_RETRIES=74,
NET_TCP_ABORT_ON_OVERFLOW=75,
NET_TCP_SYNACK_RETRIES=76,
NET_TCP_MAX_ORPHANS=77,
NET_TCP_MAX_TW_BUCKETS=78,
NET_TCP_FACK=79,
NET_TCP_REORDERING=80,
NET_TCP_ECN=81,
NET_TCP_DSACK=82,
NET_TCP_MEM=83,
NET_TCP_WMEM=84,
NET_TCP_RMEM=85,
NET_TCP_APP_WIN=86,
NET_TCP_ADV_WIN_SCALE=87,
NET_IPV4_NONLOCAL_BIND=88,
NET_IPV4_ICMP_RATELIMIT=89,
NET_IPV4_ICMP_RATEMASK=90,
NET_TCP_TW_REUSE=91,
NET_TCP_FRTO=92,
NET_TCP_LOW_LATENCY=93,
NET_IPV4_IPFRAG_SECRET_INTERVAL=94,
NET_IPV4_IGMP_MAX_MSF=96,
NET_TCP_NO_METRICS_SAVE=97,
NET_TCP_DEFAULT_WIN_SCALE=105,
NET_TCP_MODERATE_RCVBUF=106,
NET_TCP_TSO_WIN_DIVISOR=107,
NET_TCP_BIC_BETA=108,
NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR=109,
NET_TCP_CONG_CONTROL=110,
NET_TCP_ABC=111,
NET_IPV4_IPFRAG_MAX_DIST=112,
NET_TCP_MTU_PROBING=113,
NET_TCP_BASE_MSS=114,
NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115,
NET_TCP_DMA_COPYBREAK=116,
NET_TCP_SLOW_START_AFTER_IDLE=117,
NET_CIPSOV4_CACHE_ENABLE=118,
NET_CIPSOV4_CACHE_BUCKET_SIZE=119,
NET_CIPSOV4_RBM_OPTFMT=120,
NET_CIPSOV4_RBM_STRICTVALID=121,
NET_TCP_AVAIL_CONG_CONTROL=122,
NET_TCP_ALLOWED_CONG_CONTROL=123,
NET_TCP_MAX_SSTHRESH=124,
NET_TCP_FRTO_RESPONSE=125,
};

enum {
NET_IPV4_ROUTE_FLUSH=1,
NET_IPV4_ROUTE_MIN_DELAY=2,
NET_IPV4_ROUTE_MAX_DELAY=3,
NET_IPV4_ROUTE_GC_THRESH=4,
NET_IPV4_ROUTE_MAX_SIZE=5,
NET_IPV4_ROUTE_GC_MIN_INTERVAL=6,
NET_IPV4_ROUTE_GC_TIMEOUT=7,
NET_IPV4_ROUTE_GC_INTERVAL=8,
NET_IPV4_ROUTE_REDIRECT_LOAD=9,
NET_IPV4_ROUTE_REDIRECT_NUMBER=10,
NET_IPV4_ROUTE_REDIRECT_SILENCE=11,
NET_IPV4_ROUTE_ERROR_COST=12,
NET_IPV4_ROUTE_ERROR_BURST=13,
NET_IPV4_ROUTE_GC_ELASTICITY=14,
NET_IPV4_ROUTE_MTU_EXPIRES=15,
NET_IPV4_ROUTE_MIN_PMTU=16,
NET_IPV4_ROUTE_MIN_ADVMSS=17,
NET_IPV4_ROUTE_SECRET_INTERVAL=18,
NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS=19,
};

enum
{
NET_PROTO_CONF_ALL=-2,
NET_PROTO_CONF_DEFAULT=-3


};

enum
{
NET_IPV4_CONF_FORWARDING=1,
NET_IPV4_CONF_MC_FORWARDING=2,
NET_IPV4_CONF_PROXY_ARP=3,
NET_IPV4_CONF_ACCEPT_REDIRECTS=4,
NET_IPV4_CONF_SECURE_REDIRECTS=5,
NET_IPV4_CONF_SEND_REDIRECTS=6,
NET_IPV4_CONF_SHARED_MEDIA=7,
NET_IPV4_CONF_RP_FILTER=8,
NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE=9,
NET_IPV4_CONF_BOOTP_RELAY=10,
NET_IPV4_CONF_LOG_MARTIANS=11,
NET_IPV4_CONF_TAG=12,
NET_IPV4_CONF_ARPFILTER=13,
NET_IPV4_CONF_MEDIUM_ID=14,
NET_IPV4_CONF_NOXFRM=15,
NET_IPV4_CONF_NOPOLICY=16,
NET_IPV4_CONF_FORCE_IGMP_VERSION=17,
NET_IPV4_CONF_ARP_ANNOUNCE=18,
NET_IPV4_CONF_ARP_IGNORE=19,
NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
NET_IPV4_CONF_ARP_ACCEPT=21,
NET_IPV4_CONF_ARP_NOTIFY=22,
NET_IPV4_CONF_ARP_EVICT_NOCARRIER=23,
};


enum
{
NET_IPV4_NF_CONNTRACK_MAX=1,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT=10,
NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT=12,
NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT=13,
NET_IPV4_NF_CONNTRACK_BUCKETS=14,
NET_IPV4_NF_CONNTRACK_LOG_INVALID=15,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
NET_IPV4_NF_CONNTRACK_TCP_LOOSE=17,
NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL=18,
NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS=19,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
NET_IPV4_NF_CONNTRACK_COUNT=27,
NET_IPV4_NF_CONNTRACK_CHECKSUM=28,
};


enum {
NET_IPV6_CONF=16,
NET_IPV6_NEIGH=17,
NET_IPV6_ROUTE=18,
NET_IPV6_ICMP=19,
NET_IPV6_BINDV6ONLY=20,
NET_IPV6_IP6FRAG_HIGH_THRESH=21,
NET_IPV6_IP6FRAG_LOW_THRESH=22,
NET_IPV6_IP6FRAG_TIME=23,
NET_IPV6_IP6FRAG_SECRET_INTERVAL=24,
NET_IPV6_MLD_MAX_MSF=25,
};

enum {
NET_IPV6_ROUTE_FLUSH=1,
NET_IPV6_ROUTE_GC_THRESH=2,
NET_IPV6_ROUTE_MAX_SIZE=3,
NET_IPV6_ROUTE_GC_MIN_INTERVAL=4,
NET_IPV6_ROUTE_GC_TIMEOUT=5,
NET_IPV6_ROUTE_GC_INTERVAL=6,
NET_IPV6_ROUTE_GC_ELASTICITY=7,
NET_IPV6_ROUTE_MTU_EXPIRES=8,
NET_IPV6_ROUTE_MIN_ADVMSS=9,
NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS=10
};

enum {
NET_IPV6_FORWARDING=1,
NET_IPV6_HOP_LIMIT=2,
NET_IPV6_MTU=3,
NET_IPV6_ACCEPT_RA=4,
NET_IPV6_ACCEPT_REDIRECTS=5,
NET_IPV6_AUTOCONF=6,
NET_IPV6_DAD_TRANSMITS=7,
NET_IPV6_RTR_SOLICITS=8,
NET_IPV6_RTR_SOLICIT_INTERVAL=9,
NET_IPV6_RTR_SOLICIT_DELAY=10,
NET_IPV6_USE_TEMPADDR=11,
NET_IPV6_TEMP_VALID_LFT=12,
NET_IPV6_TEMP_PREFERED_LFT=13,
NET_IPV6_REGEN_MAX_RETRY=14,
NET_IPV6_MAX_DESYNC_FACTOR=15,
NET_IPV6_MAX_ADDRESSES=16,
NET_IPV6_FORCE_MLD_VERSION=17,
NET_IPV6_ACCEPT_RA_DEFRTR=18,
NET_IPV6_ACCEPT_RA_PINFO=19,
NET_IPV6_ACCEPT_RA_RTR_PREF=20,
NET_IPV6_RTR_PROBE_INTERVAL=21,
NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22,
NET_IPV6_PROXY_NDP=23,
NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
NET_IPV6_ACCEPT_RA_FROM_LOCAL=26,
NET_IPV6_ACCEPT_RA_RT_INFO_MIN_PLEN=27,
NET_IPV6_RA_DEFRTR_METRIC=28,
__NET_IPV6_MAX
};


enum {
NET_IPV6_ICMP_RATELIMIT = 1,
NET_IPV6_ICMP_ECHO_IGNORE_ALL = 2
};


enum {
NET_NEIGH_MCAST_SOLICIT=1,
NET_NEIGH_UCAST_SOLICIT=2,
NET_NEIGH_APP_SOLICIT=3,
NET_NEIGH_RETRANS_TIME=4,
NET_NEIGH_REACHABLE_TIME=5,
NET_NEIGH_DELAY_PROBE_TIME=6,
NET_NEIGH_GC_STALE_TIME=7,
NET_NEIGH_UNRES_QLEN=8,
NET_NEIGH_PROXY_QLEN=9,
NET_NEIGH_ANYCAST_DELAY=10,
NET_NEIGH_PROXY_DELAY=11,
NET_NEIGH_LOCKTIME=12,
NET_NEIGH_GC_INTERVAL=13,
NET_NEIGH_GC_THRESH1=14,
NET_NEIGH_GC_THRESH2=15,
NET_NEIGH_GC_THRESH3=16,
NET_NEIGH_RETRANS_TIME_MS=17,
NET_NEIGH_REACHABLE_TIME_MS=18,
};


enum {
NET_DCCP_DEFAULT=1,
};


enum {
NET_IPX_PPROP_BROADCASTING=1,
NET_IPX_FORWARDING=2
};


enum {
NET_LLC2=1,
NET_LLC_STATION=2,
};


enum {
NET_LLC2_TIMEOUT=1,
};


enum {
NET_LLC_STATION_ACK_TIMEOUT=1,
};


enum {
NET_LLC2_ACK_TIMEOUT=1,
NET_LLC2_P_TIMEOUT=2,
NET_LLC2_REJ_TIMEOUT=3,
NET_LLC2_BUSY_TIMEOUT=4,
};


enum {
NET_ATALK_AARP_EXPIRY_TIME=1,
NET_ATALK_AARP_TICK_TIME=2,
NET_ATALK_AARP_RETRANSMIT_LIMIT=3,
NET_ATALK_AARP_RESOLVE_TIME=4
};



enum {
NET_NETROM_DEFAULT_PATH_QUALITY=1,
NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER=2,
NET_NETROM_NETWORK_TTL_INITIALISER=3,
NET_NETROM_TRANSPORT_TIMEOUT=4,
NET_NETROM_TRANSPORT_MAXIMUM_TRIES=5,
NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY=6,
NET_NETROM_TRANSPORT_BUSY_DELAY=7,
NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE=8,
NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT=9,
NET_NETROM_ROUTING_CONTROL=10,
NET_NETROM_LINK_FAILS_COUNT=11,
NET_NETROM_RESET=12
};


enum {
NET_AX25_IP_DEFAULT_MODE=1,
NET_AX25_DEFAULT_MODE=2,
NET_AX25_BACKOFF_TYPE=3,
NET_AX25_CONNECT_MODE=4,
NET_AX25_STANDARD_WINDOW=5,
NET_AX25_EXTENDED_WINDOW=6,
NET_AX25_T1_TIMEOUT=7,
NET_AX25_T2_TIMEOUT=8,
NET_AX25_T3_TIMEOUT=9,
NET_AX25_IDLE_TIMEOUT=10,
NET_AX25_N2=11,
NET_AX25_PACLEN=12,
NET_AX25_PROTOCOL=13,
NET_AX25_DAMA_SLAVE_TIMEOUT=14
};


enum {
NET_ROSE_RESTART_REQUEST_TIMEOUT=1,
NET_ROSE_CALL_REQUEST_TIMEOUT=2,
NET_ROSE_RESET_REQUEST_TIMEOUT=3,
NET_ROSE_CLEAR_REQUEST_TIMEOUT=4,
NET_ROSE_ACK_HOLD_BACK_TIMEOUT=5,
NET_ROSE_ROUTING_CONTROL=6,
NET_ROSE_LINK_FAIL_TIMEOUT=7,
NET_ROSE_MAX_VCS=8,
NET_ROSE_WINDOW_SIZE=9,
NET_ROSE_NO_ACTIVITY_TIMEOUT=10
};


enum {
NET_X25_RESTART_REQUEST_TIMEOUT=1,
NET_X25_CALL_REQUEST_TIMEOUT=2,
NET_X25_RESET_REQUEST_TIMEOUT=3,
NET_X25_CLEAR_REQUEST_TIMEOUT=4,
NET_X25_ACK_HOLD_BACK_TIMEOUT=5,
NET_X25_FORWARD=6
};


enum
{
NET_TR_RIF_TIMEOUT=1
};


enum {
NET_DECNET_NODE_TYPE = 1,
NET_DECNET_NODE_ADDRESS = 2,
NET_DECNET_NODE_NAME = 3,
NET_DECNET_DEFAULT_DEVICE = 4,
NET_DECNET_TIME_WAIT = 5,
NET_DECNET_DN_COUNT = 6,
NET_DECNET_DI_COUNT = 7,
NET_DECNET_DR_COUNT = 8,
NET_DECNET_DST_GC_INTERVAL = 9,
NET_DECNET_CONF = 10,
NET_DECNET_NO_FC_MAX_CWND = 11,
NET_DECNET_MEM = 12,
NET_DECNET_RMEM = 13,
NET_DECNET_WMEM = 14,
NET_DECNET_DEBUG_LEVEL = 255
};


enum {
NET_DECNET_CONF_LOOPBACK = -2,
NET_DECNET_CONF_DDCMP = -3,
NET_DECNET_CONF_PPP = -4,
NET_DECNET_CONF_X25 = -5,
NET_DECNET_CONF_GRE = -6,
NET_DECNET_CONF_ETHER = -7


};


enum {
NET_DECNET_CONF_DEV_PRIORITY = 1,
NET_DECNET_CONF_DEV_T1 = 2,
NET_DECNET_CONF_DEV_T2 = 3,
NET_DECNET_CONF_DEV_T3 = 4,
NET_DECNET_CONF_DEV_FORWARDING = 5,
NET_DECNET_CONF_DEV_BLKSIZE = 6,
NET_DECNET_CONF_DEV_STATE = 7
};


enum {
NET_SCTP_RTO_INITIAL = 1,
NET_SCTP_RTO_MIN = 2,
NET_SCTP_RTO_MAX = 3,
NET_SCTP_RTO_ALPHA = 4,
NET_SCTP_RTO_BETA = 5,
NET_SCTP_VALID_COOKIE_LIFE = 6,
NET_SCTP_ASSOCIATION_MAX_RETRANS = 7,
NET_SCTP_PATH_MAX_RETRANS = 8,
NET_SCTP_MAX_INIT_RETRANSMITS = 9,
NET_SCTP_HB_INTERVAL = 10,
NET_SCTP_PRESERVE_ENABLE = 11,
NET_SCTP_MAX_BURST = 12,
NET_SCTP_ADDIP_ENABLE = 13,
NET_SCTP_PRSCTP_ENABLE = 14,
NET_SCTP_SNDBUF_POLICY = 15,
NET_SCTP_SACK_TIMEOUT = 16,
NET_SCTP_RCVBUF_POLICY = 17,
};


enum {
NET_BRIDGE_NF_CALL_ARPTABLES = 1,
NET_BRIDGE_NF_CALL_IPTABLES = 2,
NET_BRIDGE_NF_CALL_IP6TABLES = 3,
NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5,
};



enum
{
FS_NRINODE=1,
FS_STATINODE=2,
FS_MAXINODE=3,
FS_NRDQUOT=4,
FS_MAXDQUOT=5,
FS_NRFILE=6,
FS_MAXFILE=7,
FS_DENTRY=8,
FS_NRSUPER=9,
FS_MAXSUPER=10,
FS_OVERFLOWUID=11,
FS_OVERFLOWGID=12,
FS_LEASES=13,
FS_DIR_NOTIFY=14,
FS_LEASE_TIME=15,
FS_DQSTATS=16,
FS_XFS=17,
FS_AIO_NR=18,
FS_AIO_MAX_NR=19,
FS_INOTIFY=20,
FS_OCFS2=988,
};


enum {
FS_DQ_LOOKUPS = 1,
FS_DQ_DROPS = 2,
FS_DQ_READS = 3,
FS_DQ_WRITES = 4,
FS_DQ_CACHE_HITS = 5,
FS_DQ_ALLOCATED = 6,
FS_DQ_FREE = 7,
FS_DQ_SYNCS = 8,
FS_DQ_WARNINGS = 9,
};




enum {
DEV_CDROM=1,
DEV_HWMON=2,
DEV_PARPORT=3,
DEV_RAID=4,
DEV_MAC_HID=5,
DEV_SCSI=6,
DEV_IPMI=7,
};


enum {
DEV_CDROM_INFO=1,
DEV_CDROM_AUTOCLOSE=2,
DEV_CDROM_AUTOEJECT=3,
DEV_CDROM_DEBUG=4,
DEV_CDROM_LOCK=5,
DEV_CDROM_CHECK_MEDIA=6
};


enum {
DEV_PARPORT_DEFAULT=-3
};


enum {
DEV_RAID_SPEED_LIMIT_MIN=1,
DEV_RAID_SPEED_LIMIT_MAX=2
};


enum {
DEV_PARPORT_DEFAULT_TIMESLICE=1,
DEV_PARPORT_DEFAULT_SPINTIME=2
};


enum {
DEV_PARPORT_SPINTIME=1,
DEV_PARPORT_BASE_ADDR=2,
DEV_PARPORT_IRQ=3,
DEV_PARPORT_DMA=4,
DEV_PARPORT_MODES=5,
DEV_PARPORT_DEVICES=6,
DEV_PARPORT_AUTOPROBE=16
};


enum {
DEV_PARPORT_DEVICES_ACTIVE=-3,
};


enum {
DEV_PARPORT_DEVICE_TIMESLICE=1,
};


enum {
DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES=1,
DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES=2,
DEV_MAC_HID_MOUSE_BUTTON_EMULATION=3,
DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE=4,
DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE=5,
DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES=6
};


enum {
DEV_SCSI_LOGGING_LEVEL=1,
};


enum {
DEV_IPMI_POWEROFF_POWERCYCLE=1,
};


enum
{
ABI_DEFHANDLER_COFF=1,
ABI_DEFHANDLER_ELF=2,
ABI_DEFHANDLER_LCALL7=3,
ABI_DEFHANDLER_LIBCSO=4,
ABI_TRACE=5,
ABI_FAKE_UTSNAME=6,
};
# 31 "./include/linux/sysctl.h" 2


struct completion;
struct ctl_table;
struct nsproxy;
struct ctl_table_root;
struct ctl_table_header;
struct ctl_dir;
# 55 "./include/linux/sysctl.h"
extern const int sysctl_vals[];





extern const unsigned long sysctl_long_vals[];

typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos);

int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *);
int proc_dobool(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *);
int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *);
int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *);
int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
int proc_dou8vec_minmax(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *);
int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *,
loff_t *);
int proc_dointvec_ms_jiffies(struct ctl_table *, int, void *, size_t *,
loff_t *);
int proc_doulongvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *);
int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void *,
size_t *, loff_t *);
int proc_do_large_bitmap(struct ctl_table *, int, void *, size_t *, loff_t *);
int proc_do_static_key(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
# 114 "./include/linux/sysctl.h"
struct ctl_table_poll {
atomic_t event;
wait_queue_head_t wait;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *proc_sys_poll_event(struct ctl_table_poll *poll)
{
return (void *)(unsigned long)atomic_read(&poll->event);
}
# 132 "./include/linux/sysctl.h"
struct ctl_table {
const char *procname;
void *data;
int maxlen;
umode_t mode;
struct ctl_table *child;
proc_handler *proc_handler;
struct ctl_table_poll *poll;
void *extra1;
void *extra2;
} ;

struct ctl_node {
struct rb_node node;
struct ctl_table_header *header;
};



struct ctl_table_header {
union {
struct {
struct ctl_table *ctl_table;
int used;
int count;
int nreg;
};
struct callback_head rcu;
};
struct completion *unregistering;
struct ctl_table *ctl_table_arg;
struct ctl_table_root *root;
struct ctl_table_set *set;
struct ctl_dir *parent;
struct ctl_node *node;
struct hlist_head inodes;
};

struct ctl_dir {

struct ctl_table_header header;
struct rb_root root;
};

struct ctl_table_set {
int (*is_seen)(struct ctl_table_set *);
struct ctl_dir dir;
};

struct ctl_table_root {
struct ctl_table_set default_set;
struct ctl_table_set *(*lookup)(struct ctl_table_root *root);
void (*set_ownership)(struct ctl_table_header *head,
struct ctl_table *table,
kuid_t *uid, kgid_t *gid);
int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
};


struct ctl_path {
const char *procname;
};
# 207 "./include/linux/sysctl.h"
extern int __register_sysctl_base(struct ctl_table *base_table);



void proc_sys_poll_notify(struct ctl_table_poll *poll);

extern void setup_sysctl_set(struct ctl_table_set *p,
struct ctl_table_root *root,
int (*is_seen)(struct ctl_table_set *));
extern void retire_sysctl_set(struct ctl_table_set *set);

struct ctl_table_header *__register_sysctl_table(
struct ctl_table_set *set,
const char *path, struct ctl_table *table);
struct ctl_table_header *__register_sysctl_paths(
struct ctl_table_set *set,
const struct ctl_path *path, struct ctl_table *table);
struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table);
struct ctl_table_header *register_sysctl_table(struct ctl_table * table);
struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
struct ctl_table *table);

void unregister_sysctl_table(struct ctl_table_header * table);

extern int sysctl_init_bases(void);
extern void __register_sysctl_init(const char *path, struct ctl_table *table,
const char *table_name);

extern struct ctl_table_header *register_sysctl_mount_point(const char *path);

void do_sysctl_args(void);
int do_proc_douintvec(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos,
int (*conv)(unsigned long *lvalp,
unsigned int *valp,
int write, void *data),
void *data);

extern int pwrsw_enabled;
extern int unaligned_enabled;
extern int unaligned_dump_stack;
extern int no_unaligned_warning;

extern struct ctl_table sysctl_mount_point[];
# 299 "./include/linux/sysctl.h"
int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
# 18 "./include/linux/key.h" 2


# 1 "./include/linux/assoc_array.h" 1
# 22 "./include/linux/assoc_array.h"
struct assoc_array {
struct assoc_array_ptr *root;
unsigned long nr_leaves_on_tree;
};




struct assoc_array_ops {

unsigned long (*get_key_chunk)(const void *index_key, int level);


unsigned long (*get_object_key_chunk)(const void *object, int level);


bool (*compare_object)(const void *object, const void *index_key);




int (*diff_objects)(const void *object, const void *index_key);


void (*free_object)(void *object);
};




struct assoc_array_edit;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void assoc_array_init(struct assoc_array *array)
{
array->root = ((void *)0);
array->nr_leaves_on_tree = 0;
}

extern int assoc_array_iterate(const struct assoc_array *array,
int (*iterator)(const void *object,
void *iterator_data),
void *iterator_data);
extern void *assoc_array_find(const struct assoc_array *array,
const struct assoc_array_ops *ops,
const void *index_key);
extern void assoc_array_destroy(struct assoc_array *array,
const struct assoc_array_ops *ops);
extern struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
const struct assoc_array_ops *ops,
const void *index_key,
void *object);
extern void assoc_array_insert_set_object(struct assoc_array_edit *edit,
void *object);
extern struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
const struct assoc_array_ops *ops,
const void *index_key);
extern struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
const struct assoc_array_ops *ops);
extern void assoc_array_apply_edit(struct assoc_array_edit *edit);
extern void assoc_array_cancel_edit(struct assoc_array_edit *edit);
extern int assoc_array_gc(struct assoc_array *array,
const struct assoc_array_ops *ops,
bool (*iterator)(void *object, void *iterator_data),
void *iterator_data);
# 21 "./include/linux/key.h" 2







typedef int32_t key_serial_t;


typedef uint32_t key_perm_t;

struct key;
struct net;
# 77 "./include/linux/key.h"
enum key_need_perm {
KEY_NEED_UNSPECIFIED,
KEY_NEED_VIEW,
KEY_NEED_READ,
KEY_NEED_WRITE,
KEY_NEED_SEARCH,
KEY_NEED_LINK,
KEY_NEED_SETATTR,
KEY_NEED_UNLINK,
KEY_SYSADMIN_OVERRIDE,
KEY_AUTHTOKEN_OVERRIDE,
KEY_DEFER_PERM_CHECK,
};

struct seq_file;
struct user_struct;
struct signal_struct;
struct cred;

struct key_type;
struct key_owner;
struct key_tag;
struct keyring_list;
struct keyring_name;

struct key_tag {
struct callback_head rcu;
refcount_t usage;
bool removed;
};

struct keyring_index_key {

unsigned long hash;
union {
struct {

u16 desc_len;
char desc[sizeof(long) - 2];




};
unsigned long x;
};
struct key_type *type;
struct key_tag *domain_tag;
const char *description;
};

union key_payload {
void *rcu_data0;
void *data[4];
};
# 147 "./include/linux/key.h"
typedef struct __key_reference_with_attributes *key_ref_t;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) key_ref_t make_key_ref(const struct key *key,
bool possession)
{
return (key_ref_t) ((unsigned long) key | possession);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct key *key_ref_to_ptr(const key_ref_t key_ref)
{
return (struct key *) ((unsigned long) key_ref & ~1UL);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_key_possessed(const key_ref_t key_ref)
{
return (unsigned long) key_ref & 1UL;
}

typedef int (*key_restrict_link_func_t)(struct key *dest_keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key);

struct key_restriction {
key_restrict_link_func_t check;
struct key *key;
struct key_type *keytype;
};

enum key_state {
KEY_IS_UNINSTANTIATED,
KEY_IS_POSITIVE,
};
# 189 "./include/linux/key.h"
struct key {
refcount_t usage;
key_serial_t serial;
union {
struct list_head graveyard_link;
struct rb_node serial_node;
};



struct rw_semaphore sem;
struct key_user *user;
void *security;
union {
time64_t expiry;
time64_t revoked_at;
};
time64_t last_used_at;
kuid_t uid;
kgid_t gid;
key_perm_t perm;
unsigned short quotalen;
unsigned short datalen;



short state;






unsigned long flags;
# 239 "./include/linux/key.h"
union {
struct keyring_index_key index_key;
struct {
unsigned long hash;
unsigned long len_desc;
struct key_type *type;
struct key_tag *domain_tag;
char *description;
};
};





union {
union key_payload payload;
struct {

struct list_head name_link;
struct assoc_array keys;
};
};
# 274 "./include/linux/key.h"
struct key_restriction *restrict_link;
};

extern struct key *key_alloc(struct key_type *type,
const char *desc,
kuid_t uid, kgid_t gid,
const struct cred *cred,
key_perm_t perm,
unsigned long flags,
struct key_restriction *restrict_link);
# 294 "./include/linux/key.h"
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
extern void key_put(struct key *key);
extern bool key_put_tag(struct key_tag *tag);
extern void key_remove_domain(struct key_tag *domain_tag);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct key *__key_get(struct key *key)
{
refcount_inc(&key->usage);
return key;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct key *key_get(struct key *key)
{
return key ? __key_get(key) : key;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void key_ref_put(key_ref_t key_ref)
{
key_put(key_ref_to_ptr(key_ref));
}

extern struct key *request_key_tag(struct key_type *type,
const char *description,
struct key_tag *domain_tag,
const char *callout_info);

extern struct key *request_key_rcu(struct key_type *type,
const char *description,
struct key_tag *domain_tag);

extern struct key *request_key_with_auxdata(struct key_type *type,
const char *description,
struct key_tag *domain_tag,
const void *callout_info,
size_t callout_len,
void *aux);
# 340 "./include/linux/key.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct key *request_key(struct key_type *type,
const char *description,
const char *callout_info)
{
return request_key_tag(type, description, ((void *)0), callout_info);
}
# 379 "./include/linux/key.h"
extern int wait_for_key_construction(struct key *key, bool intr);

extern int key_validate(const struct key *key);

extern key_ref_t key_create_or_update(key_ref_t keyring,
const char *type,
const char *description,
const void *payload,
size_t plen,
key_perm_t perm,
unsigned long flags);

extern int key_update(key_ref_t key,
const void *payload,
size_t plen);

extern int key_link(struct key *keyring,
struct key *key);

extern int key_move(struct key *key,
struct key *from_keyring,
struct key *to_keyring,
unsigned int flags);

extern int key_unlink(struct key *keyring,
struct key *key);

extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
const struct cred *cred,
key_perm_t perm,
unsigned long flags,
struct key_restriction *restrict_link,
struct key *dest);

extern int restrict_link_reject(struct key *keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key);

extern int keyring_clear(struct key *keyring);

extern key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
const char *description,
bool recurse);

extern int keyring_add_key(struct key *keyring,
struct key *key);

extern int keyring_restrict(key_ref_t keyring, const char *type,
const char *restriction);

extern struct key *key_lookup(key_serial_t id);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) key_serial_t key_serial(const struct key *key)
{
return key ? key->serial : 0;
}

extern void key_set_timeout(struct key *, unsigned);

extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
enum key_need_perm need_perm);
extern void key_free_user_ns(struct user_namespace *);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) short key_read_state(const struct key *key)
{

return ({ typeof(*&key->state) ___p1 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_182(void) ; if (!((sizeof(*&key->state) == sizeof(char) || sizeof(*&key->state) == sizeof(short) || sizeof(*&key->state) == sizeof(int) || sizeof(*&key->state) == sizeof(long)) || sizeof(*&key->state) == sizeof(long long))) __compiletime_assert_182(); } while (0); (*(const volatile typeof( _Generic((*&key->state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&key->state))) *)&(*&key->state)); }); do { __attribute__((__noreturn__)) extern void __compiletime_assert_183(void) ; if (!((sizeof(*&key->state) == sizeof(char) || sizeof(*&key->state) == sizeof(short) || sizeof(*&key->state) == sizeof(int) || sizeof(*&key->state) == sizeof(long)))) __compiletime_assert_183(); } while (0); __asm__ __volatile__ ("fence " "r" "," "rw" : : : "memory"); ___p1; });
}
# 457 "./include/linux/key.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool key_is_positive(const struct key *key)
{
return key_read_state(key) == KEY_IS_POSITIVE;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool key_is_negative(const struct key *key)
{
return key_read_state(key) < 0;
}
# 480 "./include/linux/key.h"
extern struct ctl_table key_sysctls[];




extern int install_thread_keyring_to_cred(struct cred *cred);
extern void key_fsuid_changed(struct cred *new_cred);
extern void key_fsgid_changed(struct cred *new_cred);
extern void key_init(void);
# 14 "./include/linux/cred.h" 2



# 1 "./include/linux/sched/user.h" 1






# 1 "./include/linux/percpu_counter.h" 1
# 20 "./include/linux/percpu_counter.h"
struct percpu_counter {
raw_spinlock_t lock;
s64 count;

struct list_head list;

s32 *counters;
};

extern int percpu_counter_batch;

int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key);
# 41 "./include/linux/percpu_counter.h"
void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
void percpu_counter_sync(struct percpu_counter *fbc);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
{
return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
s64 ret = __percpu_counter_sum(fbc);
return ret < 0 ? 0 : ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 percpu_counter_sum(struct percpu_counter *fbc)
{
return __percpu_counter_sum(fbc);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{

s64 ret = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_184(void) ; if (!((sizeof(fbc->count) == sizeof(char) || sizeof(fbc->count) == sizeof(short) || sizeof(fbc->count) == sizeof(int) || sizeof(fbc->count) == sizeof(long)) || sizeof(fbc->count) == sizeof(long long))) __compiletime_assert_184(); } while (0); (*(const volatile typeof( _Generic((fbc->count), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (fbc->count))) *)&(fbc->count)); });

if (ret >= 0)
return ret;
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool percpu_counter_initialized(struct percpu_counter *fbc)
{
return (fbc->counters != ((void *)0));
}
# 181 "./include/linux/percpu_counter.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void percpu_counter_inc(struct percpu_counter *fbc)
{
percpu_counter_add(fbc, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void percpu_counter_dec(struct percpu_counter *fbc)
{
percpu_counter_add(fbc, -1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
{
percpu_counter_add(fbc, -amount);
}
# 8 "./include/linux/sched/user.h" 2

# 1 "./include/linux/ratelimit.h" 1








static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ratelimit_state_init(struct ratelimit_state *rs,
int interval, int burst)
{
memset(rs, 0, sizeof(*rs));

do { static struct lock_class_key __key; __raw_spin_lock_init((&rs->lock), "&rs->lock", &__key, LD_WAIT_SPIN); } while (0);
rs->interval = interval;
rs->burst = burst;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ratelimit_default_init(struct ratelimit_state *rs)
{
return ratelimit_state_init(rs, (5 * 100),
10);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ratelimit_state_exit(struct ratelimit_state *rs)
{
if (!(rs->flags & ((((1UL))) << (0))))
return;

if (rs->missed) {
({ do {} while (0); _printk("\001" "4" "IPv6: " "%s: %d output lines suppressed due to ratelimiting\n", get_current()->comm, rs->missed); });

rs->missed = 0;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags)
{
rs->flags = flags;
}

extern struct ratelimit_state printk_ratelimit_state;
# 10 "./include/linux/sched/user.h" 2




struct user_struct {
refcount_t __count;

struct percpu_counter epoll_watches;

unsigned long unix_inflight;
atomic_long_t pipe_bufs;


struct hlist_node uidhash_node;
kuid_t uid;



atomic_long_t locked_vm;






struct ratelimit_state ratelimit;
};

extern int uids_sysfs_init(void);

extern struct user_struct *find_user(kuid_t);

extern struct user_struct root_user;




extern struct user_struct * alloc_uid(kuid_t);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct user_struct *get_uid(struct user_struct *u)
{
refcount_inc(&u->__count);
return u;
}
extern void free_uid(struct user_struct *);
# 18 "./include/linux/cred.h" 2

struct cred;
struct inode;




struct group_info {
atomic_t usage;
int ngroups;
kgid_t gid[];
} ;
# 40 "./include/linux/cred.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct group_info *get_group_info(struct group_info *gi)
{
atomic_inc(&gi->usage);
return gi;
}
# 57 "./include/linux/cred.h"
extern struct group_info *groups_alloc(int);
extern void groups_free(struct group_info *);

extern int in_group_p(kgid_t);
extern int in_egroup_p(kgid_t);
extern int groups_search(const struct group_info *, kgid_t);

extern int set_current_groups(struct group_info *);
extern void set_groups(struct cred *, struct group_info *);
extern bool may_setgroups(void);
extern void groups_sort(struct group_info *);
# 110 "./include/linux/cred.h"
struct cred {
atomic_t usage;







kuid_t uid;
kgid_t gid;
kuid_t suid;
kgid_t sgid;
kuid_t euid;
kgid_t egid;
kuid_t fsuid;
kgid_t fsgid;
unsigned securebits;
kernel_cap_t cap_inheritable;
kernel_cap_t cap_permitted;
kernel_cap_t cap_effective;
kernel_cap_t cap_bset;
kernel_cap_t cap_ambient;

unsigned char jit_keyring;

struct key *session_keyring;
struct key *process_keyring;
struct key *thread_keyring;
struct key *request_key_auth;




struct user_struct *user;
struct user_namespace *user_ns;
struct ucounts *ucounts;
struct group_info *group_info;

union {
int non_rcu;
struct callback_head rcu;
};
} ;

extern void __put_cred(struct cred *);
extern void exit_creds(struct task_struct *);
extern int copy_creds(struct task_struct *, unsigned long);
extern const struct cred *get_task_cred(struct task_struct *);
extern struct cred *cred_alloc_blank(void);
extern struct cred *prepare_creds(void);
extern struct cred *prepare_exec_creds(void);
extern int commit_creds(struct cred *);
extern void abort_creds(struct cred *);
extern const struct cred *override_creds(const struct cred *);
extern void revert_creds(const struct cred *);
extern struct cred *prepare_kernel_cred(struct task_struct *);
extern int change_create_files_as(struct cred *, struct inode *);
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) cred_init(void);
extern int set_cred_ucounts(struct cred *);
# 204 "./include/linux/cred.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void validate_creds(const struct cred *cred)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void validate_creds_for_do_exit(struct task_struct *tsk)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void validate_process_creds(void)
{
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cap_ambient_invariant_ok(const struct cred *cred)
{
return cap_issubset(cred->cap_ambient,
cap_intersect(cred->cap_permitted,
cred->cap_inheritable));
}
# 229 "./include/linux/cred.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cred *get_new_cred(struct cred *cred)
{
atomic_inc(&cred->usage);
return cred;
}
# 248 "./include/linux/cred.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct cred *get_cred(const struct cred *cred)
{
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return cred;
validate_creds(cred);
nonconst_cred->non_rcu = 0;
return get_new_cred(nonconst_cred);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct cred *get_cred_rcu(const struct cred *cred)
{
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return ((void *)0);
if (!atomic_inc_not_zero(&nonconst_cred->usage))
return ((void *)0);
validate_creds(cred);
nonconst_cred->non_rcu = 0;
return cred;
}
# 281 "./include/linux/cred.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_cred(const struct cred *_cred)
{
struct cred *cred = (struct cred *) _cred;

if (cred) {
validate_creds(cred);
if (atomic_dec_and_test(&(cred)->usage))
__put_cred(cred);
}
}
# 393 "./include/linux/cred.h"
extern struct user_namespace init_user_ns;
# 11 "./include/linux/sched/signal.h" 2
# 20 "./include/linux/sched/signal.h"
struct sighand_struct {
spinlock_t siglock;
refcount_t count;
wait_queue_head_t signalfd_wqh;
struct k_sigaction action[64];
};




struct pacct_struct {
int ac_flag;
long ac_exitcode;
unsigned long ac_mem;
u64 ac_utime, ac_stime;
unsigned long ac_minflt, ac_majflt;
};

struct cpu_itimer {
u64 expires;
u64 incr;
};





struct task_cputime_atomic {
atomic64_t utime;
atomic64_t stime;
atomic64_t sum_exec_runtime;
};
# 66 "./include/linux/sched/signal.h"
struct thread_group_cputimer {
struct task_cputime_atomic cputime_atomic;
};

struct multiprocess_signals {
sigset_t signal;
struct hlist_node node;
};

struct core_thread {
struct task_struct *task;
struct core_thread *next;
};

struct core_state {
atomic_t nr_threads;
struct core_thread dumper;
struct completion startup;
};
# 93 "./include/linux/sched/signal.h"
struct signal_struct {
refcount_t sigcnt;
atomic_t live;
int nr_threads;
struct list_head thread_head;

wait_queue_head_t wait_chldexit;


struct task_struct *curr_target;


struct sigpending shared_pending;


struct hlist_head multiprocess;


int group_exit_code;

int notify_count;
struct task_struct *group_exec_task;


int group_stop_count;
unsigned int flags;

struct core_state *core_state;
# 131 "./include/linux/sched/signal.h"
unsigned int is_child_subreaper:1;
unsigned int has_child_subreaper:1;




int posix_timer_id;
struct list_head posix_timers;


struct hrtimer real_timer;
ktime_t it_real_incr;






struct cpu_itimer it[2];





struct thread_group_cputimer cputimer;



struct posix_cputimers posix_cputimers;


struct pid *pids[PIDTYPE_MAX];





struct pid *tty_old_pgrp;


int leader;

struct tty_struct *tty;
# 184 "./include/linux/sched/signal.h"
seqlock_t stats_lock;
u64 utime, stime, cutime, cstime;
u64 gtime;
u64 cgtime;
struct prev_cputime prev_cputime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock;
unsigned long maxrss, cmaxrss;
struct task_io_accounting ioac;







unsigned long long sum_sched_runtime;
# 212 "./include/linux/sched/signal.h"
struct rlimit rlim[16];
# 229 "./include/linux/sched/signal.h"
bool oom_flag_origin;
short oom_score_adj;
short oom_score_adj_min;

struct mm_struct *oom_mm;


struct mutex cred_guard_mutex;





struct rw_semaphore exec_update_lock;




} ;
# 267 "./include/linux/sched/signal.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void signal_set_stop_flags(struct signal_struct *sig,
unsigned int flags)
{
({ int __ret_warn_on = !!(sig->flags & 0x00000004); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/sched/signal.h"), "i" (270), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
sig->flags = (sig->flags & ~((0x00000010|0x00000020) | 0x00000001 | 0x00000002)) | flags;
}

extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
kernel_siginfo_t *info, enum pid_type *type);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int kernel_dequeue_signal(void)
{
struct task_struct *task = get_current();
kernel_siginfo_t __info;
enum pid_type __type;
int ret;

spin_lock_irq(&task->sighand->siglock);
ret = dequeue_signal(task, &task->blocked, &__info, &__type);
spin_unlock_irq(&task->sighand->siglock);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kernel_signal_stop(void)
{
spin_lock_irq(&get_current()->sighand->siglock);
if (get_current()->jobctl & (1UL << 16))
do { unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(&get_current()->pi_lock); } while (0); do { ({ int __ret_warn_on = !!(!((((0x0100 | 0x0004))) & (0x0004 | 0x0008 | 0x0040 | 0x0080))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/sched/signal.h"), "i" (298), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); get_current()->task_state_change = ({ __label__ __here; __here: (unsigned long)&&__here; }); } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_185(void) ; if (!((sizeof(get_current()->__state) == sizeof(char) || sizeof(get_current()->__state) == sizeof(short) || sizeof(get_current()->__state) == sizeof(int) || sizeof(get_current()->__state) == sizeof(long)) || sizeof(get_current()->__state) == sizeof(long long))) __compiletime_assert_185(); } while (0); do { *(volatile typeof(get_current()->__state) *)&(get_current()->__state) = (((0x0100 | 0x0004))); } while (0); } while (0); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _raw_spin_unlock_irqrestore(&get_current()->pi_lock, flags); } while (0); } while (0);
spin_unlock_irq(&get_current()->sighand->siglock);

schedule();
}






int force_sig_fault_to_task(int sig, int code, void *addr

, struct task_struct *t);
int force_sig_fault(int sig, int code, void *addr
);
int send_sig_fault(int sig, int code, void *addr

, struct task_struct *t);

int force_sig_mceerr(int code, void *, short);
int send_sig_mceerr(int code, void *, short, struct task_struct *);

int force_sig_bnderr(void *addr, void *lower, void *upper);
int force_sig_pkuerr(void *addr, u32 pkey);
int force_sig_perf(void *addr, u32 type, u64 sig_data);

int force_sig_ptrace_errno_trap(int errno, void *addr);
int force_sig_fault_trapno(int sig, int code, void *addr, int trapno);
int send_sig_fault_trapno(int sig, int code, void *addr, int trapno,
struct task_struct *t);
int force_sig_seccomp(int syscall, int reason, bool force_coredump);

extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
extern void force_sigsegv(int sig);
extern int force_sig_info(struct kernel_siginfo *);
extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
const struct cred *);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern __attribute__((__warn_unused_result__)) bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int);
extern void force_fatal_sig(int);
extern void force_exit_sig(int);
extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_notify_signal(void)
{
clear_ti_thread_flag(((struct thread_info *)get_current()), 9);
do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_notify_signal(struct task_struct *task)
{
if (!test_and_set_tsk_thread_flag(task, 9) &&
!wake_up_state(task, 0x0001))
kick_process(task);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int restart_syscall(void)
{
set_tsk_thread_flag(get_current(), 2);
return -513;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int task_sigpending(struct task_struct *p)
{
return __builtin_expect(!!(test_tsk_thread_flag(p,2)), 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int signal_pending(struct task_struct *p)
{





if (__builtin_expect(!!(test_tsk_thread_flag(p, 9)), 0))
return 1;
return task_sigpending(p);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __fatal_signal_pending(struct task_struct *p)
{
return __builtin_expect(!!(sigismember(&p->pending.signal, 9)), 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int fatal_signal_pending(struct task_struct *p)
{
return task_sigpending(p) && __fatal_signal_pending(p);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int signal_pending_state(unsigned int state, struct task_struct *p)
{
if (!(state & (0x0001 | 0x0100)))
return 0;
if (!signal_pending(p))
return 0;

return (state & 0x0001) || __fatal_signal_pending(p);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fault_signal_pending(vm_fault_t fault_flags,
struct pt_regs *regs)
{
return __builtin_expect(!!((fault_flags & VM_FAULT_RETRY) && (fatal_signal_pending(get_current()) || ((((regs)->status & (0x00000100UL)) == 0) && signal_pending(get_current())))), 0);


}







extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void calculate_sigpending(void);

extern void signal_wake_up_state(struct task_struct *t, unsigned int state);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void signal_wake_up(struct task_struct *t, bool resume)
{
signal_wake_up_state(t, resume ? 0x0100 : 0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
signal_wake_up_state(t, resume ? 0x0008 : 0);
}

void task_join_group_stop(struct task_struct *task);
# 466 "./include/linux/sched/signal.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_restore_sigmask(void)
{
set_ti_thread_flag(((struct thread_info *)get_current()), 4);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_tsk_restore_sigmask(struct task_struct *task)
{
clear_tsk_thread_flag(task, 4);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_restore_sigmask(void)
{
clear_ti_thread_flag(((struct thread_info *)get_current()), 4);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool test_tsk_restore_sigmask(struct task_struct *task)
{
return test_tsk_thread_flag(task, 4);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool test_restore_sigmask(void)
{
return test_ti_thread_flag(((struct thread_info *)get_current()), 4);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool test_and_clear_restore_sigmask(void)
{
return test_and_clear_ti_thread_flag(((struct thread_info *)get_current()), 4);
}
# 525 "./include/linux/sched/signal.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void restore_saved_sigmask(void)
{
if (test_and_clear_restore_sigmask())
__set_current_blocked(&get_current()->saved_sigmask);
}

extern int set_user_sigmask(const sigset_t *umask, size_t sigsetsize);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void restore_saved_sigmask_unless(bool interrupted)
{
if (interrupted)
({ int __ret_warn_on = !!(!signal_pending(get_current())); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/sched/signal.h"), "i" (536), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
else
restore_saved_sigmask();
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) sigset_t *sigmask_to_save(void)
{
sigset_t *res = &get_current()->blocked;
if (__builtin_expect(!!(test_restore_sigmask()), 0))
res = &get_current()->saved_sigmask;
return res;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int kill_cad_pid(int sig, int priv)
{
return kill_pid(cad_pid, sig, priv);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __on_sig_stack(unsigned long sp)
{




return sp > get_current()->sas_ss_sp &&
sp - get_current()->sas_ss_sp <= get_current()->sas_ss_size;

}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int on_sig_stack(unsigned long sp)
{
# 583 "./include/linux/sched/signal.h"
if (get_current()->sas_ss_flags & (1U << 31))
return 0;

return __on_sig_stack(sp);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sas_ss_flags(unsigned long sp)
{
if (!get_current()->sas_ss_size)
return 2;

return on_sig_stack(sp) ? 1 : 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sas_ss_reset(struct task_struct *p)
{
p->sas_ss_sp = 0;
p->sas_ss_size = 0;
p->sas_ss_flags = 2;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
{
if (__builtin_expect(!!((ksig->ka.sa.sa_flags & 0x08000000)), 0) && ! sas_ss_flags(sp))



return get_current()->sas_ss_sp + get_current()->sas_ss_size;

return sp;
}

extern void __cleanup_sighand(struct sighand_struct *);
extern void flush_itimer_signals(void);
# 627 "./include/linux/sched/signal.h"
extern bool current_is_single_threaded(void);
# 649 "./include/linux/sched/signal.h"
typedef int (*proc_visitor)(struct task_struct *p, void *data);
void walk_process_tree(struct task_struct *top, proc_visitor, void *);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
{
struct pid *pid;
if (type == PIDTYPE_PID)
pid = task_pid(task);
else
pid = task->signal->pids[type];
return pid;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pid *task_tgid(struct task_struct *task)
{
return task->signal->pids[PIDTYPE_TGID];
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pid *task_pgrp(struct task_struct *task)
{
return task->signal->pids[PIDTYPE_PGID];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pid *task_session(struct task_struct *task)
{
return task->signal->pids[PIDTYPE_SID];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_nr_threads(struct task_struct *task)
{
return task->signal->nr_threads;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool thread_group_leader(struct task_struct *p)
{
return p->exit_signal >= 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
return p1->signal == p2->signal;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct task_struct *next_thread(const struct task_struct *p)
{
return ({ void *__mptr = (void *)(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_186(void) ; if (!((sizeof(p->thread_group.next) == sizeof(char) || sizeof(p->thread_group.next) == sizeof(short) || sizeof(p->thread_group.next) == sizeof(int) || sizeof(p->thread_group.next) == sizeof(long)) || sizeof(p->thread_group.next) == sizeof(long long))) __compiletime_assert_186(); } while (0); (*(const volatile typeof( _Generic((p->thread_group.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (p->thread_group.next))) *)&(p->thread_group.next)); })); _Static_assert(__builtin_types_compatible_p(typeof(*(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_186(void) ; if (!((sizeof(p->thread_group.next) == sizeof(char) || sizeof(p->thread_group.next) == sizeof(short) || sizeof(p->thread_group.next) == sizeof(int) || sizeof(p->thread_group.next) == sizeof(long)) || sizeof(p->thread_group.next) == sizeof(long long))) __compiletime_assert_186(); } while (0); (*(const volatile typeof( _Generic((p->thread_group.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (p->thread_group.next))) *)&(p->thread_group.next)); }))), typeof(((struct task_struct *)0)->thread_group)) || __builtin_types_compatible_p(typeof(*(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_186(void) ; if (!((sizeof(p->thread_group.next) == sizeof(char) || sizeof(p->thread_group.next) == sizeof(short) || sizeof(p->thread_group.next) == sizeof(int) || sizeof(p->thread_group.next) == sizeof(long)) || sizeof(p->thread_group.next) == sizeof(long long))) __compiletime_assert_186(); } while (0); (*(const volatile typeof( _Generic((p->thread_group.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (p->thread_group.next))) *)&(p->thread_group.next)); }))), typeof(void)), "pointer type mismatch in container_of()"); ((struct task_struct *)(__mptr - __builtin_offsetof(struct task_struct, thread_group))); });

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int thread_group_empty(struct task_struct *p)
{
return list_empty(&p->thread_group);
}




extern bool thread_group_exited(struct pid *pid);

extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
unsigned long *flags);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sighand_struct *lock_task_sighand(struct task_struct *task,
unsigned long *flags)
{
struct sighand_struct *ret;

ret = __lock_task_sighand(task, flags);
(void)(ret);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void unlock_task_sighand(struct task_struct *task,
unsigned long *flags)
{
spin_unlock_irqrestore(&task->sighand->siglock, *flags);
}


extern void lockdep_assert_task_sighand_held(struct task_struct *task);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long task_rlimit(const struct task_struct *task,
unsigned int limit)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_187(void) ; if (!((sizeof(task->signal->rlim[limit].rlim_cur) == sizeof(char) || sizeof(task->signal->rlim[limit].rlim_cur) == sizeof(short) || sizeof(task->signal->rlim[limit].rlim_cur) == sizeof(int) || sizeof(task->signal->rlim[limit].rlim_cur) == sizeof(long)) || sizeof(task->signal->rlim[limit].rlim_cur) == sizeof(long long))) __compiletime_assert_187(); } while (0); (*(const volatile typeof( _Generic((task->signal->rlim[limit].rlim_cur), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (task->signal->rlim[limit].rlim_cur))) *)&(task->signal->rlim[limit].rlim_cur)); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long task_rlimit_max(const struct task_struct *task,
unsigned int limit)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_188(void) ; if (!((sizeof(task->signal->rlim[limit].rlim_max) == sizeof(char) || sizeof(task->signal->rlim[limit].rlim_max) == sizeof(short) || sizeof(task->signal->rlim[limit].rlim_max) == sizeof(int) || sizeof(task->signal->rlim[limit].rlim_max) == sizeof(long)) || sizeof(task->signal->rlim[limit].rlim_max) == sizeof(long long))) __compiletime_assert_188(); } while (0); (*(const volatile typeof( _Generic((task->signal->rlim[limit].rlim_max), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (task->signal->rlim[limit].rlim_max))) *)&(task->signal->rlim[limit].rlim_max)); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long rlimit(unsigned int limit)
{
return task_rlimit(get_current(), limit);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long rlimit_max(unsigned int limit)
{
return task_rlimit_max(get_current(), limit);
}
# 7 "./include/linux/rcuwait.h" 2
# 16 "./include/linux/rcuwait.h"
struct rcuwait {
struct task_struct *task;
};




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rcuwait_init(struct rcuwait *w)
{
w->task = ((void *)0);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rcuwait_active(struct rcuwait *w)
{
return !!({ typeof(*(w->task)) *__UNIQUE_ID_rcu189 = (typeof(*(w->task)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_190(void) ; if (!((sizeof((w->task)) == sizeof(char) || sizeof((w->task)) == sizeof(short) || sizeof((w->task)) == sizeof(int) || sizeof((w->task)) == sizeof(long)) || sizeof((w->task)) == sizeof(long long))) __compiletime_assert_190(); } while (0); (*(const volatile typeof( _Generic(((w->task)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((w->task)))) *)&((w->task))); }); ; ((typeof(*(w->task)) *)(__UNIQUE_ID_rcu189)); });
}

extern int rcuwait_wake_up(struct rcuwait *w);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void prepare_to_rcuwait(struct rcuwait *w)
{
do { uintptr_t _r_a_p__v = (uintptr_t)(get_current()); ; if (__builtin_constant_p(get_current()) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_191(void) ; if (!((sizeof((w->task)) == sizeof(char) || sizeof((w->task)) == sizeof(short) || sizeof((w->task)) == sizeof(int) || sizeof((w->task)) == sizeof(long)) || sizeof((w->task)) == sizeof(long long))) __compiletime_assert_191(); } while (0); do { *(volatile typeof((w->task)) *)&((w->task)) = ((typeof(w->task))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_192(void) ; if (!((sizeof(*&w->task) == sizeof(char) || sizeof(*&w->task) == sizeof(short) || sizeof(*&w->task) == sizeof(int) || sizeof(*&w->task) == sizeof(long)))) __compiletime_assert_192(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_193(void) ; if (!((sizeof(*&w->task) == sizeof(char) || sizeof(*&w->task) == sizeof(short) || sizeof(*&w->task) == sizeof(int) || sizeof(*&w->task) == sizeof(long)) || sizeof(*&w->task) == sizeof(long long))) __compiletime_assert_193(); } while (0); do { *(volatile typeof(*&w->task) *)&(*&w->task) = ((typeof(*((typeof(w->task))_r_a_p__v)) *)((typeof(w->task))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
}

extern void finish_rcuwait(struct rcuwait *w);
# 8 "./include/linux/percpu-rwsem.h" 2

# 1 "./include/linux/rcu_sync.h" 1
# 17 "./include/linux/rcu_sync.h"
struct rcu_sync {
int gp_state;
int gp_count;
wait_queue_head_t gp_wait;

struct callback_head cb_head;
};
# 32 "./include/linux/rcu_sync.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool rcu_sync_is_idle(struct rcu_sync *rsp)
{
do { } while (0 && (!rcu_read_lock_any_held()));

return !({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_194(void) ; if (!((sizeof(rsp->gp_state) == sizeof(char) || sizeof(rsp->gp_state) == sizeof(short) || sizeof(rsp->gp_state) == sizeof(int) || sizeof(rsp->gp_state) == sizeof(long)) || sizeof(rsp->gp_state) == sizeof(long long))) __compiletime_assert_194(); } while (0); (*(const volatile typeof( _Generic((rsp->gp_state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rsp->gp_state))) *)&(rsp->gp_state)); });
}

extern void rcu_sync_init(struct rcu_sync *);
extern void rcu_sync_enter_start(struct rcu_sync *);
extern void rcu_sync_enter(struct rcu_sync *);
extern void rcu_sync_exit(struct rcu_sync *);
extern void rcu_sync_dtor(struct rcu_sync *);
# 10 "./include/linux/percpu-rwsem.h" 2


struct percpu_rw_semaphore {
struct rcu_sync rss;
unsigned int *read_count;
struct rcuwait writer;
wait_queue_head_t waiters;
atomic_t block;

struct lockdep_map dep_map;

};
# 45 "./include/linux/percpu-rwsem.h"
extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void percpu_down_read(struct percpu_rw_semaphore *sem)
{
do { __might_sleep("include/linux/percpu-rwsem.h", 49); __cond_resched(); } while (0);

lock_acquire(&sem->dep_map, 0, 0, 1, 1, ((void *)0), (unsigned long)__builtin_return_address(0));

do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
# 62 "./include/linux/percpu-rwsem.h"
if (__builtin_expect(!!(rcu_sync_is_idle(&sem->rss)), 1))
do { do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*sem->read_count)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
else
__percpu_down_read(sem, false);




do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
bool ret = true;

do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);



if (__builtin_expect(!!(rcu_sync_is_idle(&sem->rss)), 1))
do { do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*sem->read_count)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
else
ret = __percpu_down_read(sem, true);
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);





if (ret)
lock_acquire(&sem->dep_map, 0, 1, 1, 1, ((void *)0), (unsigned long)__builtin_return_address(0));

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void percpu_up_read(struct percpu_rw_semaphore *sem)
{
lock_release(&sem->dep_map, (unsigned long)__builtin_return_address(0));

do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);



if (__builtin_expect(!!(rcu_sync_is_idle(&sem->rss)), 1)) {
do { do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*sem->read_count)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*sem->read_count))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*sem->read_count))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*sem->read_count))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*sem->read_count))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
} else {




do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);





do { do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*sem->read_count)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*sem->read_count))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*sem->read_count))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*sem->read_count))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count))); (typeof((typeof(*(&(*sem->read_count))) *)(&(*sem->read_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*sem->read_count))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
rcuwait_wake_up(&sem->writer);
}
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}

extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);

extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
const char *, struct lock_class_key *);

extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
# 141 "./include/linux/percpu-rwsem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
bool read, unsigned long ip)
{
lock_release(&sem->dep_map, ip);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
bool read, unsigned long ip)
{
lock_acquire(&sem->dep_map, 0, 1, read, 1, ((void *)0), ip);
}
# 34 "./include/linux/fs.h" 2

# 1 "./include/linux/delayed_call.h" 1
# 10 "./include/linux/delayed_call.h"
struct delayed_call {
void (*fn)(void *);
void *arg;
};




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_delayed_call(struct delayed_call *call,
void (*fn)(void *), void *arg)
{
call->fn = fn;
call->arg = arg;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void do_delayed_call(struct delayed_call *call)
{
if (call->fn)
call->fn(call->arg);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_delayed_call(struct delayed_call *call)
{
call->fn = ((void *)0);
}
# 36 "./include/linux/fs.h" 2
# 1 "./include/linux/uuid.h" 1
# 11 "./include/linux/uuid.h"
# 1 "./include/uapi/linux/uuid.h" 1
# 15 "./include/uapi/linux/uuid.h"
typedef struct {
__u8 b[16];
} guid_t;
# 27 "./include/uapi/linux/uuid.h"
typedef guid_t uuid_le;
# 12 "./include/linux/uuid.h" 2




typedef struct {
__u8 b[16];
} uuid_t;
# 33 "./include/linux/uuid.h"
extern const guid_t guid_null;
extern const uuid_t uuid_null;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool guid_equal(const guid_t *u1, const guid_t *u2)
{
return memcmp(u1, u2, sizeof(guid_t)) == 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void guid_copy(guid_t *dst, const guid_t *src)
{
memcpy(dst, src, sizeof(guid_t));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void import_guid(guid_t *dst, const __u8 *src)
{
memcpy(dst, src, sizeof(guid_t));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void export_guid(__u8 *dst, const guid_t *src)
{
memcpy(dst, src, sizeof(guid_t));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool guid_is_null(const guid_t *guid)
{
return guid_equal(guid, &guid_null);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool uuid_equal(const uuid_t *u1, const uuid_t *u2)
{
return memcmp(u1, u2, sizeof(uuid_t)) == 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void uuid_copy(uuid_t *dst, const uuid_t *src)
{
memcpy(dst, src, sizeof(uuid_t));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void import_uuid(uuid_t *dst, const __u8 *src)
{
memcpy(dst, src, sizeof(uuid_t));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void export_uuid(__u8 *dst, const uuid_t *src)
{
memcpy(dst, src, sizeof(uuid_t));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool uuid_is_null(const uuid_t *uuid)
{
return uuid_equal(uuid, &uuid_null);
}

void generate_random_uuid(unsigned char uuid[16]);
void generate_random_guid(unsigned char guid[16]);

extern void guid_gen(guid_t *u);
extern void uuid_gen(uuid_t *u);

bool __attribute__((__warn_unused_result__)) uuid_is_valid(const char *uuid);

extern const u8 guid_index[16];
extern const u8 uuid_index[16];

int guid_parse(const char *uuid, guid_t *u);
int uuid_parse(const char *uuid, uuid_t *u);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int uuid_le_cmp(const guid_t u1, const guid_t u2)
{
return memcmp(&u1, &u2, sizeof(guid_t));
}
# 37 "./include/linux/fs.h" 2
# 1 "./include/linux/errseq.h" 1







typedef u32 errseq_t;

errseq_t errseq_set(errseq_t *eseq, int err);
errseq_t errseq_sample(errseq_t *eseq);
int errseq_check(errseq_t *eseq, errseq_t since);
int errseq_check_and_advance(errseq_t *eseq, errseq_t *since);
# 38 "./include/linux/fs.h" 2
# 1 "./include/linux/ioprio.h" 1





# 1 "./include/linux/sched/rt.h" 1






struct task_struct;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rt_prio(int prio)
{
if (__builtin_expect(!!(prio < 100), 0))
return 1;
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rt_task(struct task_struct *p)
{
return rt_prio(p->prio);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool task_is_realtime(struct task_struct *tsk)
{
int policy = tsk->policy;

if (policy == 1 || policy == 2)
return true;
if (policy == 6)
return true;
return false;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
{
return p->pi_top_task;
}
extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tsk_is_pi_blocked(struct task_struct *tsk)
{
return tsk->pi_blocked_on != ((void *)0);
}
# 58 "./include/linux/sched/rt.h"
extern void normalize_rt_tasks(void);
# 7 "./include/linux/ioprio.h" 2
# 1 "./include/linux/iocontext.h" 1








enum {
ICQ_EXITED = 1 << 2,
ICQ_DESTROYED = 1 << 3,
};
# 73 "./include/linux/iocontext.h"
struct io_cq {
struct request_queue *q;
struct io_context *ioc;







union {
struct list_head q_node;
struct kmem_cache *__rcu_icq_cache;
};
union {
struct hlist_node ioc_node;
struct callback_head __rcu_head;
};

unsigned int flags;
};





struct io_context {
atomic_long_t refcount;
atomic_t active_ref;

unsigned short ioprio;
# 115 "./include/linux/iocontext.h"
};

struct task_struct;

void put_io_context(struct io_context *ioc);
void exit_io_context(struct task_struct *task);
int __copy_io(unsigned long clone_flags, struct task_struct *tsk);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int copy_io(unsigned long clone_flags, struct task_struct *tsk)
{
if (!get_current()->io_context)
return 0;
return __copy_io(clone_flags, tsk);
}
# 8 "./include/linux/ioprio.h" 2

# 1 "./include/uapi/linux/ioprio.h" 1
# 27 "./include/uapi/linux/ioprio.h"
enum {
IOPRIO_CLASS_NONE,
IOPRIO_CLASS_RT,
IOPRIO_CLASS_BE,
IOPRIO_CLASS_IDLE,
};







enum {
IOPRIO_WHO_PROCESS = 1,
IOPRIO_WHO_PGRP,
IOPRIO_WHO_USER,
};
# 10 "./include/linux/ioprio.h" 2
# 19 "./include/linux/ioprio.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ioprio_valid(unsigned short ioprio)
{
unsigned short class = (((ioprio) >> 13) & 0x07);

return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int task_nice_ioprio(struct task_struct *task)
{
return (task_nice(task) + 20) / 5;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int task_nice_ioclass(struct task_struct *task)
{
if (task->policy == 5)
return IOPRIO_CLASS_IDLE;
else if (task_is_realtime(task))
return IOPRIO_CLASS_RT;
else
return IOPRIO_CLASS_BE;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_current_ioprio(void)
{
struct io_context *ioc = get_current()->io_context;

if (ioc)
return ioc->ioprio;
return ((((IOPRIO_CLASS_BE) & 0x07) << 13) | ((4) & ((1UL << 13) - 1)));
}




extern int ioprio_best(unsigned short aprio, unsigned short bprio);

extern int set_task_ioprio(struct task_struct *task, int ioprio);


extern int ioprio_check_cap(int ioprio);
# 39 "./include/linux/fs.h" 2
# 1 "./include/linux/fs_types.h" 1
# 71 "./include/linux/fs_types.h"
extern unsigned char fs_ftype_to_dtype(unsigned int filetype);
extern unsigned char fs_umode_to_ftype(umode_t mode);
extern unsigned char fs_umode_to_dtype(umode_t mode);
# 40 "./include/linux/fs.h" 2


# 1 "./include/linux/mount.h" 1
# 20 "./include/linux/mount.h"
struct super_block;
struct vfsmount;
struct dentry;
struct mnt_namespace;
struct fs_context;
# 71 "./include/linux/mount.h"
struct vfsmount {
struct dentry *mnt_root;
struct super_block *mnt_sb;
int mnt_flags;
struct user_namespace *mnt_userns;
} ;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct user_namespace *mnt_user_ns(const struct vfsmount *mnt)
{

return ({ typeof(*&mnt->mnt_userns) ___p1 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_195(void) ; if (!((sizeof(*&mnt->mnt_userns) == sizeof(char) || sizeof(*&mnt->mnt_userns) == sizeof(short) || sizeof(*&mnt->mnt_userns) == sizeof(int) || sizeof(*&mnt->mnt_userns) == sizeof(long)) || sizeof(*&mnt->mnt_userns) == sizeof(long long))) __compiletime_assert_195(); } while (0); (*(const volatile typeof( _Generic((*&mnt->mnt_userns), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&mnt->mnt_userns))) *)&(*&mnt->mnt_userns)); }); do { __attribute__((__noreturn__)) extern void __compiletime_assert_196(void) ; if (!((sizeof(*&mnt->mnt_userns) == sizeof(char) || sizeof(*&mnt->mnt_userns) == sizeof(short) || sizeof(*&mnt->mnt_userns) == sizeof(int) || sizeof(*&mnt->mnt_userns) == sizeof(long)))) __compiletime_assert_196(); } while (0); __asm__ __volatile__ ("fence " "r" "," "rw" : : : "memory"); ___p1; });
}

struct file;
struct path;

extern int mnt_want_write(struct vfsmount *mnt);
extern int mnt_want_write_file(struct file *file);
extern void mnt_drop_write(struct vfsmount *mnt);
extern void mnt_drop_write_file(struct file *file);
extern void mntput(struct vfsmount *mnt);
extern struct vfsmount *mntget(struct vfsmount *mnt);
extern struct vfsmount *mnt_clone_internal(const struct path *path);
extern bool __mnt_is_readonly(struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);

struct path;
extern struct vfsmount *clone_private_mount(const struct path *path);
extern int __mnt_want_write(struct vfsmount *);
extern void __mnt_drop_write(struct vfsmount *);

struct file_system_type;
extern struct vfsmount *fc_mount(struct fs_context *fc);
extern struct vfsmount *vfs_create_mount(struct fs_context *fc);
extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
int flags, const char *name,
void *data);
extern struct vfsmount *vfs_submount(const struct dentry *mountpoint,
struct file_system_type *type,
const char *name, void *data);

extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
extern void mark_mounts_for_expiry(struct list_head *mounts);

extern dev_t name_to_dev_t(const char *name);
extern bool path_is_mountpoint(const struct path *path);

extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
# 43 "./include/linux/fs.h" 2

# 1 "./include/linux/mnt_idmapping.h" 1







struct user_namespace;





extern struct user_namespace init_user_ns;
# 25 "./include/linux/mnt_idmapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool initial_idmapping(const struct user_namespace *ns)
{
return ns == &init_user_ns;
}
# 44 "./include/linux/mnt_idmapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool no_idmapping(const struct user_namespace *mnt_userns,
const struct user_namespace *fs_userns)
{
return initial_idmapping(mnt_userns) || mnt_userns == fs_userns;
}
# 70 "./include/linux/mnt_idmapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kuid_t mapped_kuid_fs(struct user_namespace *mnt_userns,
struct user_namespace *fs_userns,
kuid_t kuid)
{
uid_t uid;

if (no_idmapping(mnt_userns, fs_userns))
return kuid;
if (initial_idmapping(fs_userns))
uid = __kuid_val(kuid);
else
uid = from_kuid(fs_userns, kuid);
if (uid == (uid_t)-1)
return (kuid_t){ -1 };
return make_kuid(mnt_userns, uid);
}
# 107 "./include/linux/mnt_idmapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kgid_t mapped_kgid_fs(struct user_namespace *mnt_userns,
struct user_namespace *fs_userns,
kgid_t kgid)
{
gid_t gid;

if (no_idmapping(mnt_userns, fs_userns))
return kgid;
if (initial_idmapping(fs_userns))
gid = __kgid_val(kgid);
else
gid = from_kgid(fs_userns, kgid);
if (gid == (gid_t)-1)
return (kgid_t){ -1 };
return make_kgid(mnt_userns, gid);
}
# 144 "./include/linux/mnt_idmapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kuid_t mapped_kuid_user(struct user_namespace *mnt_userns,
struct user_namespace *fs_userns,
kuid_t kuid)
{
uid_t uid;

if (no_idmapping(mnt_userns, fs_userns))
return kuid;
uid = from_kuid(mnt_userns, kuid);
if (uid == (uid_t)-1)
return (kuid_t){ -1 };
if (initial_idmapping(fs_userns))
return (kuid_t){ uid };
return make_kuid(fs_userns, uid);
}
# 180 "./include/linux/mnt_idmapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kgid_t mapped_kgid_user(struct user_namespace *mnt_userns,
struct user_namespace *fs_userns,
kgid_t kgid)
{
gid_t gid;

if (no_idmapping(mnt_userns, fs_userns))
return kgid;
gid = from_kgid(mnt_userns, kgid);
if (gid == (gid_t)-1)
return (kgid_t){ -1 };
if (initial_idmapping(fs_userns))
return (kgid_t){ gid };
return make_kgid(fs_userns, gid);
}
# 209 "./include/linux/mnt_idmapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kuid_t mapped_fsuid(struct user_namespace *mnt_userns,
struct user_namespace *fs_userns)
{
return mapped_kuid_user(mnt_userns, fs_userns, (({ ({ do { } while (0 && (!((1)))); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); })->fsuid; })));
}
# 228 "./include/linux/mnt_idmapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kgid_t mapped_fsgid(struct user_namespace *mnt_userns,
struct user_namespace *fs_userns)
{
return mapped_kgid_user(mnt_userns, fs_userns, (({ ({ do { } while (0 && (!((1)))); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); })->fsgid; })));
}
# 45 "./include/linux/fs.h" 2
# 1 "./include/linux/slab.h" 1
# 16 "./include/linux/slab.h"
# 1 "./include/linux/overflow.h" 1
# 50 "./include/linux/overflow.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __attribute__((__warn_unused_result__)) __must_check_overflow(bool overflow)
{
return __builtin_expect(!!(overflow), 0);
}
# 131 "./include/linux/overflow.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) size_t __attribute__((__warn_unused_result__)) size_mul(size_t factor1, size_t factor2)
{
size_t bytes;

if (__must_check_overflow(({ typeof(factor1) __a = (factor1); typeof(factor2) __b = (factor2); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); })))
return (~(size_t)0);

return bytes;
}
# 151 "./include/linux/overflow.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) size_t __attribute__((__warn_unused_result__)) size_add(size_t addend1, size_t addend2)
{
size_t bytes;

if (__must_check_overflow(({ typeof(addend1) __a = (addend1); typeof(addend2) __b = (addend2); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_add_overflow(__a, __b, __d); })))
return (~(size_t)0);

return bytes;
}
# 173 "./include/linux/overflow.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) size_t __attribute__((__warn_unused_result__)) size_sub(size_t minuend, size_t subtrahend)
{
size_t bytes;

if (minuend == (~(size_t)0) || subtrahend == (~(size_t)0) ||
__must_check_overflow(({ typeof(minuend) __a = (minuend); typeof(subtrahend) __b = (subtrahend); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_sub_overflow(__a, __b, __d); })))
return (~(size_t)0);

return bytes;
}
# 17 "./include/linux/slab.h" 2


# 1 "./include/linux/percpu-refcount.h" 1
# 59 "./include/linux/percpu-refcount.h"
struct percpu_ref;
typedef void (percpu_ref_func_t)(struct percpu_ref *);


enum {
__PERCPU_REF_ATOMIC = 1LU << 0,
__PERCPU_REF_DEAD = 1LU << 1,
__PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,

__PERCPU_REF_FLAG_BITS = 2,
};


enum {







PERCPU_REF_INIT_ATOMIC = 1 << 0,






PERCPU_REF_INIT_DEAD = 1 << 1,




PERCPU_REF_ALLOW_REINIT = 1 << 2,
};

struct percpu_ref_data {
atomic_long_t count;
percpu_ref_func_t *release;
percpu_ref_func_t *confirm_switch;
bool force_atomic:1;
bool allow_reinit:1;
struct callback_head rcu;
struct percpu_ref *ref;
};

struct percpu_ref {




unsigned long percpu_count_ptr;







struct percpu_ref_data *data;
};

int __attribute__((__warn_unused_result__)) percpu_ref_init(struct percpu_ref *ref,
percpu_ref_func_t *release, unsigned int flags,
gfp_t gfp);
void percpu_ref_exit(struct percpu_ref *ref);
void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch);
void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
void percpu_ref_resurrect(struct percpu_ref *ref);
void percpu_ref_reinit(struct percpu_ref *ref);
bool percpu_ref_is_zero(struct percpu_ref *ref);
# 147 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void percpu_ref_kill(struct percpu_ref *ref)
{
percpu_ref_kill_and_confirm(ref, ((void *)0));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __ref_is_percpu(struct percpu_ref *ref,
unsigned long **percpu_countp)
{
unsigned long percpu_ptr;
# 174 "./include/linux/percpu-refcount.h"
percpu_ptr = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_197(void) ; if (!((sizeof(ref->percpu_count_ptr) == sizeof(char) || sizeof(ref->percpu_count_ptr) == sizeof(short) || sizeof(ref->percpu_count_ptr) == sizeof(int) || sizeof(ref->percpu_count_ptr) == sizeof(long)) || sizeof(ref->percpu_count_ptr) == sizeof(long long))) __compiletime_assert_197(); } while (0); (*(const volatile typeof( _Generic((ref->percpu_count_ptr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ref->percpu_count_ptr))) *)&(ref->percpu_count_ptr)); });







if (__builtin_expect(!!(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD), 0))
return false;

*percpu_countp = (unsigned long *)percpu_ptr;
return true;
}
# 198 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long *percpu_count;

rcu_read_lock();

if (__ref_is_percpu(ref, &percpu_count))
do { do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*percpu_count)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += nr; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += nr; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += nr; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += nr; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
else
atomic_long_add(nr, &ref->data->count);

rcu_read_unlock();
}
# 220 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void percpu_ref_get(struct percpu_ref *ref)
{
percpu_ref_get_many(ref, 1);
}
# 235 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool percpu_ref_tryget_many(struct percpu_ref *ref,
unsigned long nr)
{
unsigned long *percpu_count;
bool ret;

rcu_read_lock();

if (__ref_is_percpu(ref, &percpu_count)) {
do { do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*percpu_count)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += nr; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += nr; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += nr; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += nr; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
ret = true;
} else {
ret = atomic_long_add_unless(&ref->data->count, nr, 0);
}

rcu_read_unlock();

return ret;
}
# 264 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool percpu_ref_tryget(struct percpu_ref *ref)
{
return percpu_ref_tryget_many(ref, 1);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref)
{
unsigned long *percpu_count;
bool ret = false;

({ int __ret_warn_on = !!(!rcu_read_lock_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/percpu-refcount.h"), "i" (280), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });

if (__builtin_expect(!!(__ref_is_percpu(ref, &percpu_count)), 1)) {
do { do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*percpu_count)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
ret = true;
} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
ret = atomic_long_inc_not_zero(&ref->data->count);
}
return ret;
}
# 306 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
bool ret = false;

rcu_read_lock();
ret = percpu_ref_tryget_live_rcu(ref);
rcu_read_unlock();
return ret;
}
# 326 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long *percpu_count;

rcu_read_lock();

if (__ref_is_percpu(ref, &percpu_count))
do { do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*percpu_count)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*percpu_count))(nr); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*percpu_count))(nr); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*percpu_count))(nr); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*percpu_count))) *)(&(*percpu_count))); (typeof((typeof(*(&(*percpu_count))) *)(&(*percpu_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*percpu_count))(nr); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
else if (__builtin_expect(!!(atomic_long_sub_and_test(nr, &ref->data->count)), 0))
ref->data->release(ref);

rcu_read_unlock();
}
# 349 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void percpu_ref_put(struct percpu_ref *ref)
{
percpu_ref_put_many(ref, 1);
}
# 363 "./include/linux/percpu-refcount.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool percpu_ref_is_dying(struct percpu_ref *ref)
{
return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
}
# 20 "./include/linux/slab.h" 2
# 133 "./include/linux/slab.h"
# 1 "./include/linux/kasan.h" 1





# 1 "./include/linux/kasan-enabled.h" 1




# 1 "./include/linux/static_key.h" 1
# 6 "./include/linux/kasan-enabled.h" 2
# 23 "./include/linux/kasan-enabled.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool kasan_enabled(void)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool kasan_hw_tags_enabled(void)
{
return false;
}
# 7 "./include/linux/kasan.h" 2

# 1 "./include/linux/static_key.h" 1
# 9 "./include/linux/kasan.h" 2


struct kmem_cache;
struct page;
struct slab;
struct vm_struct;
struct task_struct;
# 24 "./include/linux/kasan.h"
typedef unsigned int kasan_vmalloc_flags_t;
# 74 "./include/linux/kasan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int kasan_add_zero_shadow(void *start, unsigned long size)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_remove_zero_shadow(void *start,
unsigned long size)
{}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_enable_current(void) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_disable_current(void) {}
# 93 "./include/linux/kasan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool kasan_has_integrated_init(void)
{
return kasan_hw_tags_enabled();
}
# 272 "./include/linux/kasan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) slab_flags_t kasan_never_merge(void)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_unpoison_range(const void *address, size_t size) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_poison_pages(struct page *page, unsigned int order,
bool init) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_unpoison_pages(struct page *page, unsigned int order,
bool init) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_poison_slab(struct slab *slab) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_poison_object_data(struct kmem_cache *cache,
void *object) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
{
return (void *)object;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
{
return false;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_kfree_large(void *ptr) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_slab_free_mempool(void *ptr) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags, bool init)
{
return object;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size, gfp_t flags)
{
return (void *)object;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
{
return (void *)ptr;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kasan_krealloc(const void *object, size_t new_size,
gfp_t flags)
{
return (void *)object;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool kasan_check_byte(const void *address)
{
return true;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_unpoison_task_stack(struct task_struct *task) {}
# 343 "./include/linux/kasan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_record_aux_stack(void *ptr) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_record_aux_stack_noalloc(void *ptr) {}
# 369 "./include/linux/kasan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kasan_reset_tag(const void *addr)
{
return (void *)addr;
}
# 385 "./include/linux/kasan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_init_sw_tags(void) { }






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_init_hw_tags_cpu(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_init_hw_tags(void) { }
# 444 "./include/linux/kasan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_populate_early_vm_area_shadow(void *start,
unsigned long size) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int kasan_populate_vmalloc(unsigned long start,
unsigned long size)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_release_vmalloc(unsigned long start,
unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end) { }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kasan_unpoison_vmalloc(const void *start,
unsigned long size,
kasan_vmalloc_flags_t flags)
{
return (void *)start;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_poison_vmalloc(const void *start, unsigned long size)
{ }
# 480 "./include/linux/kasan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_free_module_shadow(const struct vm_struct *vm) {}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kasan_non_canonical_hook(unsigned long addr) { }
# 134 "./include/linux/slab.h" 2

struct list_lru;
struct mem_cgroup;



void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) kmem_cache_init(void);
bool slab_is_available(void);

struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
unsigned int align, slab_flags_t flags,
void (*ctor)(void *));
struct kmem_cache *kmem_cache_create_usercopy(const char *name,
unsigned int size, unsigned int align,
slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *));
void kmem_cache_destroy(struct kmem_cache *s);
int kmem_cache_shrink(struct kmem_cache *s);
# 180 "./include/linux/slab.h"
void * __attribute__((__warn_unused_result__)) krealloc(const void *objp, size_t new_size, gfp_t flags) __attribute__((__alloc_size__(2))) __attribute__((__malloc__));
void kfree(const void *objp);
void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
size_t ksize(const void *objp);

bool kmem_valid_obj(void *object);
void kmem_dump_obj(void *object);
# 301 "./include/linux/slab.h"
enum kmalloc_cache_type {
KMALLOC_NORMAL = 0,

KMALLOC_DMA = KMALLOC_NORMAL,


KMALLOC_CGROUP = KMALLOC_NORMAL,



KMALLOC_RECLAIM,



NR_KMALLOC_TYPES
};


extern struct kmem_cache *
kmalloc_caches[NR_KMALLOC_TYPES][((12) + 1) + 1];
# 330 "./include/linux/slab.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) enum kmalloc_cache_type kmalloc_type(gfp_t flags)
{




if (__builtin_expect(!!((flags & ((( gfp_t)0x10u) | (0 ? (( gfp_t)0x01u) : 0) | (0 ? (( gfp_t)0x400000u) : 0))) == 0), 1))
return KMALLOC_NORMAL;
# 346 "./include/linux/slab.h"
if (0 && (flags & (( gfp_t)0x01u)))
return KMALLOC_DMA;
if (!0 || (flags & (( gfp_t)0x10u)))
return KMALLOC_RECLAIM;
else
return KMALLOC_CGROUP;
}
# 367 "./include/linux/slab.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned int __kmalloc_index(size_t size,
bool size_is_constant)
{
if (!size)
return 0;

if (size <= (1 << 3))
return 3;

if ((1 << 3) <= 32 && size > 64 && size <= 96)
return 1;
if ((1 << 3) <= 64 && size > 128 && size <= 192)
return 2;
if (size <= 8) return 3;
if (size <= 16) return 4;
if (size <= 32) return 5;
if (size <= 64) return 6;
if (size <= 128) return 7;
if (size <= 256) return 8;
if (size <= 512) return 9;
if (size <= 1024) return 10;
if (size <= 2 * 1024) return 11;
if (size <= 4 * 1024) return 12;
if (size <= 8 * 1024) return 13;
if (size <= 16 * 1024) return 14;
if (size <= 32 * 1024) return 15;
if (size <= 64 * 1024) return 16;
if (size <= 128 * 1024) return 17;
if (size <= 256 * 1024) return 18;
if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
if (size <= 2 * 1024 * 1024) return 21;
if (size <= 4 * 1024 * 1024) return 22;
if (size <= 8 * 1024 * 1024) return 23;
if (size <= 16 * 1024 * 1024) return 24;
if (size <= 32 * 1024 * 1024) return 25;

if (!0 && size_is_constant)
do { __attribute__((__noreturn__)) extern void __compiletime_assert_198(void) ; if (!(!(1))) __compiletime_assert_198(); } while (0);
else
do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/slab.h"), "i" (407), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0);


return -1;
}



void *__kmalloc(size_t size, gfp_t flags) __attribute__((__assume_aligned__(__alignof__(unsigned long long)))) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __attribute__((__assume_aligned__(__alignof__(unsigned long long)))) __attribute__((__malloc__));
void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
gfp_t gfpflags) __attribute__((__assume_aligned__(__alignof__(unsigned long long)))) __attribute__((__malloc__));
void kmem_cache_free(struct kmem_cache *s, void *objp);
# 428 "./include/linux/slab.h"
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void kfree_bulk(size_t size, void **p)
{
kmem_cache_free_bulk(((void *)0), size, p);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__alloc_size__(1))) __attribute__((__malloc__)) void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __kmalloc(size, flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
{
return kmem_cache_alloc(s, flags);
}



extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
__attribute__((__assume_aligned__(__alignof__(unsigned long long)))) __attribute__((__alloc_size__(3))) __attribute__((__malloc__));






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__alloc_size__(4))) __attribute__((__malloc__)) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags, int node, size_t size)
{
return kmem_cache_alloc_trace(s, gfpflags, size);
}
# 493 "./include/linux/slab.h"
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __attribute__((__assume_aligned__(((1UL) << (12)))))
__attribute__((__alloc_size__(1))) __attribute__((__malloc__));


extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
__attribute__((__assume_aligned__(((1UL) << (12))))) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
# 507 "./include/linux/slab.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__alloc_size__(1))) __attribute__((__malloc__)) void *kmalloc_large(size_t size, gfp_t flags)
{
unsigned int order = get_order(size);
return kmalloc_order_trace(size, flags, order);
}
# 567 "./include/linux/slab.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__alloc_size__(1))) __attribute__((__malloc__)) void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {

unsigned int index;

if (size > (1UL << ((12) + 1)))
return kmalloc_large(size, flags);

index = __kmalloc_index(size, true);

if (!index)
return ((void *)16);

return kmem_cache_alloc_trace(
kmalloc_caches[kmalloc_type(flags)][index],
flags, size);

}
return __kmalloc(size, flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__alloc_size__(1))) __attribute__((__malloc__)) void *kmalloc_node(size_t size, gfp_t flags, int node)
{

if (__builtin_constant_p(size) &&
size <= (1UL << ((12) + 1))) {
unsigned int i = __kmalloc_index(size, true);

if (!i)
return ((void *)16);

return kmem_cache_alloc_node_trace(
kmalloc_caches[kmalloc_type(flags)][i],
flags, node, size);
}

return __kmalloc_node(size, flags, node);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__alloc_size__(1, 2))) __attribute__((__malloc__)) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
size_t bytes;

if (__builtin_expect(!!(__must_check_overflow(({ typeof(n) __a = (n); typeof(size) __b = (size); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); }))), 0))
return ((void *)0);
if (__builtin_constant_p(n) && __builtin_constant_p(size))
return kmalloc(bytes, flags);
return __kmalloc(bytes, flags);
}
# 631 "./include/linux/slab.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__alloc_size__(2, 3))) __attribute__((__malloc__)) void * __attribute__((__warn_unused_result__)) krealloc_array(void *p,
size_t new_n,
size_t new_size,
gfp_t flags)
{
size_t bytes;

if (__builtin_expect(!!(__must_check_overflow(({ typeof(new_n) __a = (new_n); typeof(new_size) __b = (new_size); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); }))), 0))
return ((void *)0);

return krealloc(p, bytes, flags);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__alloc_size__(1, 2))) __attribute__((__malloc__)) void *kcalloc(size_t n, size_t size, gfp_t flags)
{
return kmalloc_array(n, size, flags | (( gfp_t)0x100u));
}
# 663 "./include/linux/slab.h"
extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__alloc_size__(1, 2))) __attribute__((__malloc__)) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
int node)
{
size_t bytes;

if (__builtin_expect(!!(__must_check_overflow(({ typeof(n) __a = (n); typeof(size) __b = (size); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); }))), 0))
return ((void *)0);
if (__builtin_constant_p(n) && __builtin_constant_p(size))
return kmalloc_node(bytes, flags, node);
return __kmalloc_node(bytes, flags, node);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__alloc_size__(1, 2))) __attribute__((__malloc__)) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
{
return kmalloc_array_node(n, size, flags | (( gfp_t)0x100u), node);
}
# 702 "./include/linux/slab.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
{
return kmem_cache_alloc(k, flags | (( gfp_t)0x100u));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__alloc_size__(1))) __attribute__((__malloc__)) void *kzalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags | (( gfp_t)0x100u));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__alloc_size__(1))) __attribute__((__malloc__)) void *kzalloc_node(size_t size, gfp_t flags, int node)
{
return kmalloc_node(size, flags | (( gfp_t)0x100u), node);
}

extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__alloc_size__(1))) __attribute__((__malloc__)) void *kvmalloc(size_t size, gfp_t flags)
{
return kvmalloc_node(size, flags, (-1));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__alloc_size__(1))) __attribute__((__malloc__)) void *kvzalloc_node(size_t size, gfp_t flags, int node)
{
return kvmalloc_node(size, flags | (( gfp_t)0x100u), node);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__alloc_size__(1))) __attribute__((__malloc__)) void *kvzalloc(size_t size, gfp_t flags)
{
return kvmalloc(size, flags | (( gfp_t)0x100u));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__alloc_size__(1, 2))) __attribute__((__malloc__)) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
{
size_t bytes;

if (__builtin_expect(!!(__must_check_overflow(({ typeof(n) __a = (n); typeof(size) __b = (size); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); }))), 0))
return ((void *)0);

return kvmalloc(bytes, flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__alloc_size__(1, 2))) __attribute__((__malloc__)) void *kvcalloc(size_t n, size_t size, gfp_t flags)
{
return kvmalloc_array(n, size, flags | (( gfp_t)0x100u));
}

extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
__attribute__((__alloc_size__(3))) __attribute__((__malloc__));
extern void kvfree(const void *addr);
extern void kvfree_sensitive(const void *addr, size_t len);

unsigned int kmem_cache_size(struct kmem_cache *s);
void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) kmem_cache_init_late(void);
# 46 "./include/linux/fs.h" 2


# 1 "./include/uapi/linux/fs.h" 1
# 54 "./include/uapi/linux/fs.h"
struct file_clone_range {
__s64 src_fd;
__u64 src_offset;
__u64 src_length;
__u64 dest_offset;
};

struct fstrim_range {
__u64 start;
__u64 len;
__u64 minlen;
};






struct file_dedupe_range_info {
__s64 dest_fd;
__u64 dest_offset;
__u64 bytes_deduped;






__s32 status;
__u32 reserved;
};


struct file_dedupe_range {
__u64 src_offset;
__u64 src_length;
__u16 dest_count;
__u16 reserved1;
__u32 reserved2;
struct file_dedupe_range_info info[0];
};


struct files_stat_struct {
unsigned long nr_files;
unsigned long nr_free_files;
unsigned long max_files;
};

struct inodes_stat_t {
long nr_inodes;
long nr_unused;
long dummy[5];
};







struct fsxattr {
__u32 fsx_xflags;
__u32 fsx_extsize;
__u32 fsx_nextents;
__u32 fsx_projid;
__u32 fsx_cowextsize;
unsigned char fsx_pad[8];
};
# 287 "./include/uapi/linux/fs.h"
typedef int __kernel_rwf_t;
# 49 "./include/linux/fs.h" 2

struct backing_dev_info;
struct bdi_writeback;
struct bio;
struct io_comp_batch;
struct export_operations;
struct fiemap_extent_info;
struct hd_geometry;
struct iovec;
struct kiocb;
struct kobject;
struct pipe_inode_info;
struct poll_table_struct;
struct kstatfs;
struct vm_area_struct;
struct vfsmount;
struct cred;
struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
struct fscrypt_info;
struct fscrypt_operations;
struct fsverity_info;
struct fsverity_operations;
struct fs_context;
struct fs_parameter_spec;
struct fileattr;

extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) inode_init(void);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) inode_init_early(void);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) files_init(void);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) files_maxfiles_init(void);

extern unsigned long get_max_files(void);
extern unsigned int sysctl_nr_open;

typedef __kernel_rwf_t rwf_t;

struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private);
# 218 "./include/linux/fs.h"
struct iattr {
unsigned int ia_valid;
umode_t ia_mode;
kuid_t ia_uid;
kgid_t ia_gid;
loff_t ia_size;
struct timespec64 ia_atime;
struct timespec64 ia_mtime;
struct timespec64 ia_ctime;






struct file *ia_file;
};





# 1 "./include/linux/quota.h" 1
# 42 "./include/linux/quota.h"
# 1 "./include/uapi/linux/dqblk_xfs.h" 1
# 53 "./include/uapi/linux/dqblk_xfs.h"
typedef struct fs_disk_quota {
__s8 d_version;
__s8 d_flags;
__u16 d_fieldmask;
__u32 d_id;
__u64 d_blk_hardlimit;
__u64 d_blk_softlimit;
__u64 d_ino_hardlimit;
__u64 d_ino_softlimit;
__u64 d_bcount;
__u64 d_icount;
__s32 d_itimer;


__s32 d_btimer;
__u16 d_iwarns;
__u16 d_bwarns;
__s8 d_itimer_hi;
__s8 d_btimer_hi;
__s8 d_rtbtimer_hi;
__s8 d_padding2;
__u64 d_rtb_hardlimit;
__u64 d_rtb_softlimit;
__u64 d_rtbcount;
__s32 d_rtbtimer;
__u16 d_rtbwarns;
__s16 d_padding3;
char d_padding4[8];
} fs_disk_quota_t;
# 159 "./include/uapi/linux/dqblk_xfs.h"
typedef struct fs_qfilestat {
__u64 qfs_ino;
__u64 qfs_nblks;
__u32 qfs_nextents;
} fs_qfilestat_t;

typedef struct fs_quota_stat {
__s8 qs_version;
__u16 qs_flags;
__s8 qs_pad;
fs_qfilestat_t qs_uquota;
fs_qfilestat_t qs_gquota;
__u32 qs_incoredqs;
__s32 qs_btimelimit;
__s32 qs_itimelimit;
__s32 qs_rtbtimelimit;
__u16 qs_bwarnlimit;
__u16 qs_iwarnlimit;
} fs_quota_stat_t;
# 202 "./include/uapi/linux/dqblk_xfs.h"
struct fs_qfilestatv {
__u64 qfs_ino;
__u64 qfs_nblks;
__u32 qfs_nextents;
__u32 qfs_pad;
};

struct fs_quota_statv {
__s8 qs_version;
__u8 qs_pad1;
__u16 qs_flags;
__u32 qs_incoredqs;
struct fs_qfilestatv qs_uquota;
struct fs_qfilestatv qs_gquota;
struct fs_qfilestatv qs_pquota;
__s32 qs_btimelimit;
__s32 qs_itimelimit;
__s32 qs_rtbtimelimit;
__u16 qs_bwarnlimit;
__u16 qs_iwarnlimit;
__u16 qs_rtbwarnlimit;
__u16 qs_pad3;
__u32 qs_pad4;
__u64 qs_pad2[7];
};
# 43 "./include/linux/quota.h" 2
# 1 "./include/linux/dqblk_v1.h" 1
# 44 "./include/linux/quota.h" 2
# 1 "./include/linux/dqblk_v2.h" 1








# 1 "./include/linux/dqblk_qtree.h" 1
# 18 "./include/linux/dqblk_qtree.h"
struct dquot;
struct kqid;


struct qtree_fmt_operations {
void (*mem2disk_dqblk)(void *disk, struct dquot *dquot);
void (*disk2mem_dqblk)(struct dquot *dquot, void *disk);
int (*is_id)(void *disk, struct dquot *dquot);
};


struct qtree_mem_dqinfo {
struct super_block *dqi_sb;
int dqi_type;
unsigned int dqi_blocks;
unsigned int dqi_free_blk;
unsigned int dqi_free_entry;
unsigned int dqi_blocksize_bits;
unsigned int dqi_entry_size;
unsigned int dqi_usable_bs;
unsigned int dqi_qtree_depth;
const struct qtree_fmt_operations *dqi_ops;
};

int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int qtree_depth(struct qtree_mem_dqinfo *info)
{
unsigned int epb = info->dqi_usable_bs >> 2;
unsigned long long entries = epb;
int i;

for (i = 1; entries < (1ULL << 32); i++)
entries *= epb;
return i;
}
int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid);
# 10 "./include/linux/dqblk_v2.h" 2
# 45 "./include/linux/quota.h" 2



# 1 "./include/linux/projid.h" 1
# 17 "./include/linux/projid.h"
struct user_namespace;
extern struct user_namespace init_user_ns;

typedef __kernel_uid32_t projid_t;

typedef struct {
projid_t val;
} kprojid_t;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) projid_t __kprojid_val(kprojid_t projid)
{
return projid.val;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool projid_eq(kprojid_t left, kprojid_t right)
{
return __kprojid_val(left) == __kprojid_val(right);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool projid_lt(kprojid_t left, kprojid_t right)
{
return __kprojid_val(left) < __kprojid_val(right);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool projid_valid(kprojid_t projid)
{
return !projid_eq(projid, (kprojid_t){ -1 });
}



extern kprojid_t make_kprojid(struct user_namespace *from, projid_t projid);

extern projid_t from_kprojid(struct user_namespace *to, kprojid_t projid);
extern projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t projid);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool kprojid_has_mapping(struct user_namespace *ns, kprojid_t projid)
{
return from_kprojid(ns, projid) != (projid_t)-1;
}
# 49 "./include/linux/quota.h" 2
# 1 "./include/uapi/linux/quota.h" 1
# 90 "./include/uapi/linux/quota.h"
enum {
QIF_BLIMITS_B = 0,
QIF_SPACE_B,
QIF_ILIMITS_B,
QIF_INODES_B,
QIF_BTIME_B,
QIF_ITIME_B,
};
# 110 "./include/uapi/linux/quota.h"
struct if_dqblk {
__u64 dqb_bhardlimit;
__u64 dqb_bsoftlimit;
__u64 dqb_curspace;
__u64 dqb_ihardlimit;
__u64 dqb_isoftlimit;
__u64 dqb_curinodes;
__u64 dqb_btime;
__u64 dqb_itime;
__u32 dqb_valid;
};

struct if_nextdqblk {
__u64 dqb_bhardlimit;
__u64 dqb_bsoftlimit;
__u64 dqb_curspace;
__u64 dqb_ihardlimit;
__u64 dqb_isoftlimit;
__u64 dqb_curinodes;
__u64 dqb_btime;
__u64 dqb_itime;
__u32 dqb_valid;
__u32 dqb_id;
};
# 144 "./include/uapi/linux/quota.h"
enum {
DQF_ROOT_SQUASH_B = 0,
DQF_SYS_FILE_B = 16,

DQF_PRIVATE
};






struct if_dqinfo {
__u64 dqi_bgrace;
__u64 dqi_igrace;
__u32 dqi_flags;
__u32 dqi_valid;
};
# 178 "./include/uapi/linux/quota.h"
enum {
QUOTA_NL_C_UNSPEC,
QUOTA_NL_C_WARNING,
__QUOTA_NL_C_MAX,
};


enum {
QUOTA_NL_A_UNSPEC,
QUOTA_NL_A_QTYPE,
QUOTA_NL_A_EXCESS_ID,
QUOTA_NL_A_WARNING,
QUOTA_NL_A_DEV_MAJOR,
QUOTA_NL_A_DEV_MINOR,
QUOTA_NL_A_CAUSED_ID,
QUOTA_NL_A_PAD,
__QUOTA_NL_A_MAX,
};
# 50 "./include/linux/quota.h" 2




enum quota_type {
USRQUOTA = 0,
GRPQUOTA = 1,
PRJQUOTA = 2,
};






typedef __kernel_uid32_t qid_t;
typedef long long qsize_t;

struct kqid {
union {
kuid_t uid;
kgid_t gid;
kprojid_t projid;
};
enum quota_type type;
};

extern bool qid_eq(struct kqid left, struct kqid right);
extern bool qid_lt(struct kqid left, struct kqid right);
extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
extern bool qid_valid(struct kqid qid);
# 97 "./include/linux/quota.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct kqid make_kqid(struct user_namespace *from,
enum quota_type type, qid_t qid)
{
struct kqid kqid;

kqid.type = type;
switch (type) {
case USRQUOTA:
kqid.uid = make_kuid(from, qid);
break;
case GRPQUOTA:
kqid.gid = make_kgid(from, qid);
break;
case PRJQUOTA:
kqid.projid = make_kprojid(from, qid);
break;
default:
do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/quota.h"), "i" (114), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0);
}
return kqid;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct kqid make_kqid_invalid(enum quota_type type)
{
struct kqid kqid;

kqid.type = type;
switch (type) {
case USRQUOTA:
kqid.uid = (kuid_t){ -1 };
break;
case GRPQUOTA:
kqid.gid = (kgid_t){ -1 };
break;
case PRJQUOTA:
kqid.projid = (kprojid_t){ -1 };
break;
default:
do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/quota.h"), "i" (141), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0);
}
return kqid;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct kqid make_kqid_uid(kuid_t uid)
{
struct kqid kqid;
kqid.type = USRQUOTA;
kqid.uid = uid;
return kqid;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct kqid make_kqid_gid(kgid_t gid)
{
struct kqid kqid;
kqid.type = GRPQUOTA;
kqid.gid = gid;
return kqid;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct kqid make_kqid_projid(kprojid_t projid)
{
struct kqid kqid;
kqid.type = PRJQUOTA;
kqid.projid = projid;
return kqid;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool qid_has_mapping(struct user_namespace *ns, struct kqid qid)
{
return from_kqid(ns, qid) != (qid_t) -1;
}


extern spinlock_t dq_data_lock;
# 205 "./include/linux/quota.h"
struct mem_dqblk {
qsize_t dqb_bhardlimit;
qsize_t dqb_bsoftlimit;
qsize_t dqb_curspace;
qsize_t dqb_rsvspace;
qsize_t dqb_ihardlimit;
qsize_t dqb_isoftlimit;
qsize_t dqb_curinodes;
time64_t dqb_btime;
time64_t dqb_itime;
};




struct quota_format_type;

struct mem_dqinfo {
struct quota_format_type *dqi_format;
int dqi_fmt_id;

struct list_head dqi_dirty_list;
unsigned long dqi_flags;
unsigned int dqi_bgrace;
unsigned int dqi_igrace;
qsize_t dqi_max_spc_limit;
qsize_t dqi_max_ino_limit;
void *dqi_priv;
};

struct super_block;






enum {
DQF_INFO_DIRTY_B = DQF_PRIVATE,
};


extern void mark_info_dirty(struct super_block *sb, int type);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int info_dirty(struct mem_dqinfo *info)
{
return arch_test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags);
}

enum {
DQST_LOOKUPS,
DQST_DROPS,
DQST_READS,
DQST_WRITES,
DQST_CACHE_HITS,
DQST_ALLOC_DQUOTS,
DQST_FREE_DQUOTS,
DQST_SYNCS,
_DQST_DQSTAT_LAST
};

struct dqstats {
unsigned long stat[_DQST_DQSTAT_LAST];
struct percpu_counter counter[_DQST_DQSTAT_LAST];
};

extern struct dqstats dqstats;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dqstats_inc(unsigned int type)
{
percpu_counter_inc(&dqstats.counter[type]);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dqstats_dec(unsigned int type)
{
percpu_counter_dec(&dqstats.counter[type]);
}
# 294 "./include/linux/quota.h"
struct dquot {
struct hlist_node dq_hash;
struct list_head dq_inuse;
struct list_head dq_free;
struct list_head dq_dirty;
struct mutex dq_lock;
spinlock_t dq_dqb_lock;
atomic_t dq_count;
struct super_block *dq_sb;
struct kqid dq_id;
loff_t dq_off;
unsigned long dq_flags;
struct mem_dqblk dq_dqb;
};


struct quota_format_ops {
int (*check_quota_file)(struct super_block *sb, int type);
int (*read_file_info)(struct super_block *sb, int type);
int (*write_file_info)(struct super_block *sb, int type);
int (*free_file_info)(struct super_block *sb, int type);
int (*read_dqblk)(struct dquot *dquot);
int (*commit_dqblk)(struct dquot *dquot);
int (*release_dqblk)(struct dquot *dquot);
int (*get_next_id)(struct super_block *sb, struct kqid *qid);
};


struct dquot_operations {
int (*write_dquot) (struct dquot *);
struct dquot *(*alloc_dquot)(struct super_block *, int);
void (*destroy_dquot)(struct dquot *);
int (*acquire_dquot) (struct dquot *);
int (*release_dquot) (struct dquot *);
int (*mark_dirty) (struct dquot *);
int (*write_info) (struct super_block *, int);


qsize_t *(*get_reserved_space) (struct inode *);
int (*get_projid) (struct inode *, kprojid_t *);

int (*get_inode_usage) (struct inode *, qsize_t *);

int (*get_next_id) (struct super_block *sb, struct kqid *qid);
};

struct path;


struct qc_dqblk {
int d_fieldmask;
u64 d_spc_hardlimit;
u64 d_spc_softlimit;
u64 d_ino_hardlimit;
u64 d_ino_softlimit;
u64 d_space;
u64 d_ino_count;
s64 d_ino_timer;

s64 d_spc_timer;
int d_ino_warns;
int d_spc_warns;
u64 d_rt_spc_hardlimit;
u64 d_rt_spc_softlimit;
u64 d_rt_space;
s64 d_rt_spc_timer;
int d_rt_spc_warns;
};
# 395 "./include/linux/quota.h"
struct qc_type_state {
unsigned int flags;
unsigned int spc_timelimit;

unsigned int ino_timelimit;
unsigned int rt_spc_timelimit;
unsigned int spc_warnlimit;
unsigned int ino_warnlimit;
unsigned int rt_spc_warnlimit;
unsigned long long ino;
blkcnt_t blocks;
blkcnt_t nextents;
};

struct qc_state {
unsigned int s_incoredqs;
struct qc_type_state s_state[3];
};


struct qc_info {
int i_fieldmask;
unsigned int i_flags;
unsigned int i_spc_timelimit;

unsigned int i_ino_timelimit;
unsigned int i_rt_spc_timelimit;
unsigned int i_spc_warnlimit;
unsigned int i_ino_warnlimit;
unsigned int i_rt_spc_warnlimit;
};


struct quotactl_ops {
int (*quota_on)(struct super_block *, int, int, const struct path *);
int (*quota_off)(struct super_block *, int);
int (*quota_enable)(struct super_block *, unsigned int);
int (*quota_disable)(struct super_block *, unsigned int);
int (*quota_sync)(struct super_block *, int);
int (*set_info)(struct super_block *, int, struct qc_info *);
int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
int (*get_nextdqblk)(struct super_block *, struct kqid *,
struct qc_dqblk *);
int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
int (*get_state)(struct super_block *, struct qc_state *);
int (*rm_xquota)(struct super_block *, unsigned int);
};

struct quota_format_type {
int qf_fmt_id;
const struct quota_format_ops *qf_ops;
struct module *qf_owner;
struct quota_format_type *qf_next;
};
# 464 "./include/linux/quota.h"
enum {
_DQUOT_USAGE_ENABLED = 0,
_DQUOT_LIMITS_ENABLED,
_DQUOT_SUSPENDED,


_DQUOT_STATE_FLAGS
};
# 491 "./include/linux/quota.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int dquot_state_flag(unsigned int flags, int type)
{
return flags << type;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int dquot_generic_flag(unsigned int flags, int type)
{
return (flags >> type) & ((1 << _DQUOT_USAGE_ENABLED * 3) | (1 << _DQUOT_LIMITS_ENABLED * 3) | (1 << _DQUOT_SUSPENDED * 3));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned dquot_state_types(unsigned flags, unsigned flag)
{
do { __attribute__((__noreturn__)) extern void __compiletime_assert_199(void) ; if (!(!((flag) == 0 || (((flag) & ((flag) - 1)) != 0)))) __compiletime_assert_199(); } while (0);
return (flags / flag) & ((1 << 3) - 1);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void quota_send_warning(struct kqid qid, dev_t dev,
const char warntype)
{
return;
}


struct quota_info {
unsigned int flags;
struct rw_semaphore dqio_sem;
struct inode *files[3];
struct mem_dqinfo info[3];
const struct quota_format_ops *ops[3];
};

int register_quota_format(struct quota_format_type *fmt);
void unregister_quota_format(struct quota_format_type *fmt);

struct quota_module_name {
int qm_fmt_id;
char *qm_mod_name;
};
# 240 "./include/linux/fs.h" 2
# 273 "./include/linux/fs.h"
enum positive_aop_returns {
AOP_WRITEPAGE_ACTIVATE = 0x80000,
AOP_TRUNCATED_PAGE = 0x80001,
};
# 285 "./include/linux/fs.h"
struct page;
struct address_space;
struct writeback_control;
struct readahead_control;





enum rw_hint {
WRITE_LIFE_NOT_SET = 0,
WRITE_LIFE_NONE = 1,
WRITE_LIFE_SHORT = 2,
WRITE_LIFE_MEDIUM = 3,
WRITE_LIFE_LONG = 4,
WRITE_LIFE_EXTREME = 5,
};
# 320 "./include/linux/fs.h"
struct kiocb {
struct file *ki_filp;




loff_t ki_pos;
void (*ki_complete)(struct kiocb *iocb, long ret);
void *private;
int ki_flags;
u16 ki_ioprio;
struct wait_page_queue *ki_waitq;

};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_sync_kiocb(struct kiocb *kiocb)
{
return kiocb->ki_complete == ((void *)0);
}

struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);


int (*writepages)(struct address_space *, struct writeback_control *);


bool (*dirty_folio)(struct address_space *, struct folio *);

void (*readahead)(struct readahead_control *);

int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);


sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
int (*releasepage) (struct page *, gfp_t);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);




int (*migratepage) (struct address_space *,
struct page *, struct page *, enum migrate_mode);
bool (*isolate_page)(struct page *, isolate_mode_t);
void (*putback_page)(struct page *);
int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct page *, bool *, bool *);
int (*error_remove_page)(struct address_space *, struct page *);


int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
sector_t *span);
void (*swap_deactivate)(struct file *file);
};

extern const struct address_space_operations empty_aops;





int pagecache_write_begin(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);

int pagecache_write_end(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
# 421 "./include/linux/fs.h"
struct address_space {
struct inode *host;
struct xarray i_pages;
struct rw_semaphore invalidate_lock;
gfp_t gfp_mask;
atomic_t i_mmap_writable;




struct rb_root_cached i_mmap;
struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages;
unsigned long writeback_index;
const struct address_space_operations *a_ops;
unsigned long flags;
errseq_t wb_err;
spinlock_t private_lock;
struct list_head private_list;
void *private_data;
} __attribute__((aligned(sizeof(long)))) ;
# 456 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
{
return xa_marked(&mapping->i_pages, tag);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void i_mmap_lock_write(struct address_space *mapping)
{
down_write(&mapping->i_mmap_rwsem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int i_mmap_trylock_write(struct address_space *mapping)
{
return down_write_trylock(&mapping->i_mmap_rwsem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void i_mmap_unlock_write(struct address_space *mapping)
{
up_write(&mapping->i_mmap_rwsem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void i_mmap_lock_read(struct address_space *mapping)
{
down_read(&mapping->i_mmap_rwsem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void i_mmap_unlock_read(struct address_space *mapping)
{
up_read(&mapping->i_mmap_rwsem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void i_mmap_assert_locked(struct address_space *mapping)
{
do { ({ int __ret_warn_on = !!(debug_locks && !(lock_is_held(&(&mapping->i_mmap_rwsem)->dep_map) != 0)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/fs.h"), "i" (488), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void i_mmap_assert_write_locked(struct address_space *mapping)
{
do { ({ int __ret_warn_on = !!(debug_locks && !(lock_is_held_type(&(&mapping->i_mmap_rwsem)->dep_map, (0)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/fs.h"), "i" (493), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mapping_mapped(struct address_space *mapping)
{
return !(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_200(void) ; if (!((sizeof((&mapping->i_mmap.rb_root)->rb_node) == sizeof(char) || sizeof((&mapping->i_mmap.rb_root)->rb_node) == sizeof(short) || sizeof((&mapping->i_mmap.rb_root)->rb_node) == sizeof(int) || sizeof((&mapping->i_mmap.rb_root)->rb_node) == sizeof(long)) || sizeof((&mapping->i_mmap.rb_root)->rb_node) == sizeof(long long))) __compiletime_assert_200(); } while (0); (*(const volatile typeof( _Generic(((&mapping->i_mmap.rb_root)->rb_node), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&mapping->i_mmap.rb_root)->rb_node))) *)&((&mapping->i_mmap.rb_root)->rb_node)); }) == ((void *)0));
}
# 513 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mapping_writably_mapped(struct address_space *mapping)
{
return atomic_read(&mapping->i_mmap_writable) > 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mapping_map_writable(struct address_space *mapping)
{
return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
0 : -1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mapping_unmap_writable(struct address_space *mapping)
{
atomic_dec(&mapping->i_mmap_writable);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mapping_deny_writable(struct address_space *mapping)
{
return atomic_dec_unless_positive(&mapping->i_mmap_writable) ?
0 : -16;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mapping_allow_writable(struct address_space *mapping)
{
atomic_inc(&mapping->i_mmap_writable);
}
# 551 "./include/linux/fs.h"
struct posix_acl;
# 560 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct posix_acl *
uncached_acl_sentinel(struct task_struct *task)
{
return (void *)task + 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
is_uncached_acl(struct posix_acl *acl)
{
return (long)acl & 1;
}







struct fsnotify_mark_connector;






struct inode {
umode_t i_mode;
unsigned short i_opflags;
kuid_t i_uid;
kgid_t i_gid;
unsigned int i_flags;


struct posix_acl *i_acl;
struct posix_acl *i_default_acl;


const struct inode_operations *i_op;
struct super_block *i_sb;
struct address_space *i_mapping;






unsigned long i_ino;







union {
const unsigned int i_nlink;
unsigned int __i_nlink;
};
dev_t i_rdev;
loff_t i_size;
struct timespec64 i_atime;
struct timespec64 i_mtime;
struct timespec64 i_ctime;
spinlock_t i_lock;
unsigned short i_bytes;
u8 i_blkbits;
u8 i_write_hint;
blkcnt_t i_blocks;






unsigned long i_state;
struct rw_semaphore i_rwsem;

unsigned long dirtied_when;
unsigned long dirtied_time_when;

struct hlist_node i_hash;
struct list_head i_io_list;
# 650 "./include/linux/fs.h"
struct list_head i_lru;
struct list_head i_sb_list;
struct list_head i_wb_list;
union {
struct hlist_head i_dentry;
struct callback_head i_rcu;
};
atomic64_t i_version;
atomic64_t i_sequence;
atomic_t i_count;
atomic_t i_dio_count;
atomic_t i_writecount;

atomic_t i_readcount;

union {
const struct file_operations *i_fop;
void (*free_inode)(struct inode *);
};
struct file_lock_context *i_flctx;
struct address_space i_data;
struct list_head i_devices;
union {
struct pipe_inode_info *i_pipe;
struct cdev *i_cdev;
char *i_link;
unsigned i_dir_seq;
};

__u32 i_generation;


__u32 i_fsnotify_mask;
struct fsnotify_mark_connector *i_fsnotify_marks;
# 694 "./include/linux/fs.h"
void *i_private;
} ;

struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int i_blocksize(const struct inode *node)
{
return (1 << node->i_blkbits);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inode_unhashed(struct inode *inode)
{
return hlist_unhashed(&inode->i_hash);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_fake_hash(struct inode *inode)
{
hlist_add_fake(&inode->i_hash);
}
# 736 "./include/linux/fs.h"
enum inode_i_mutex_lock_class
{
I_MUTEX_NORMAL,
I_MUTEX_PARENT,
I_MUTEX_CHILD,
I_MUTEX_XATTR,
I_MUTEX_NONDIR2,
I_MUTEX_PARENT2,
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_lock(struct inode *inode)
{
down_write(&inode->i_rwsem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_unlock(struct inode *inode)
{
up_write(&inode->i_rwsem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_lock_shared(struct inode *inode)
{
down_read(&inode->i_rwsem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_unlock_shared(struct inode *inode)
{
up_read(&inode->i_rwsem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inode_trylock(struct inode *inode)
{
return down_write_trylock(&inode->i_rwsem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inode_trylock_shared(struct inode *inode)
{
return down_read_trylock(&inode->i_rwsem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inode_is_locked(struct inode *inode)
{
return rwsem_is_locked(&inode->i_rwsem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_lock_nested(struct inode *inode, unsigned subclass)
{
down_write_nested(&inode->i_rwsem, subclass);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_lock_shared_nested(struct inode *inode, unsigned subclass)
{
down_read_nested(&inode->i_rwsem, subclass);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void filemap_invalidate_lock(struct address_space *mapping)
{
down_write(&mapping->invalidate_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void filemap_invalidate_unlock(struct address_space *mapping)
{
up_write(&mapping->invalidate_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void filemap_invalidate_lock_shared(struct address_space *mapping)
{
down_read(&mapping->invalidate_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int filemap_invalidate_trylock_shared(
struct address_space *mapping)
{
return down_read_trylock(&mapping->invalidate_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void filemap_invalidate_unlock_shared(
struct address_space *mapping)
{
up_read(&mapping->invalidate_lock);
}

void lock_two_nondirectories(struct inode *, struct inode*);
void unlock_two_nondirectories(struct inode *, struct inode*);

void filemap_invalidate_lock_two(struct address_space *mapping1,
struct address_space *mapping2);
void filemap_invalidate_unlock_two(struct address_space *mapping1,
struct address_space *mapping2);
# 837 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) loff_t i_size_read(const struct inode *inode)
{
# 856 "./include/linux/fs.h"
return inode->i_size;

}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void i_size_write(struct inode *inode, loff_t i_size)
{
# 878 "./include/linux/fs.h"
inode->i_size = i_size;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned iminor(const struct inode *inode)
{
return ((unsigned int) ((inode->i_rdev) & ((1U << 20) - 1)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned imajor(const struct inode *inode)
{
return ((unsigned int) ((inode->i_rdev) >> 20));
}

struct fown_struct {
rwlock_t lock;
struct pid *pid;
enum pid_type pid_type;
kuid_t uid, euid;
int signum;
};
# 914 "./include/linux/fs.h"
struct file_ra_state {
unsigned long start;
unsigned int size;
unsigned int async_size;
unsigned int ra_pages;
unsigned int mmap_miss;
loff_t prev_pos;
};




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ra_has_index(struct file_ra_state *ra, unsigned long index)
{
return (index >= ra->start &&
index < ra->start + ra->size);
}

struct file {
union {
struct llist_node fu_llist;
struct callback_head fu_rcuhead;
} f_u;
struct path f_path;
struct inode *f_inode;
const struct file_operations *f_op;





spinlock_t f_lock;
atomic_long_t f_count;
unsigned int f_flags;
fmode_t f_mode;
struct mutex f_pos_lock;
loff_t f_pos;
struct fown_struct f_owner;
const struct cred *f_cred;
struct file_ra_state f_ra;

u64 f_version;




void *private_data;



struct hlist_head *f_ep;

struct address_space *f_mapping;
errseq_t f_wb_err;
errseq_t f_sb_err;
}
__attribute__((aligned(4)));

struct file_handle {
__u32 handle_bytes;
int handle_type;

unsigned char f_handle[];
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct file *get_file(struct file *f)
{
atomic_long_inc(&f->f_count);
return f;
}
# 1022 "./include/linux/fs.h"
typedef void *fl_owner_t;

struct file_lock;

struct file_lock_operations {
void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
void (*fl_release_private)(struct file_lock *);
};

struct lock_manager_operations {
fl_owner_t (*lm_get_owner)(fl_owner_t);
void (*lm_put_owner)(fl_owner_t);
void (*lm_notify)(struct file_lock *);
int (*lm_grant)(struct file_lock *, int);
bool (*lm_break)(struct file_lock *);
int (*lm_change)(struct file_lock *, int, struct list_head *);
void (*lm_setup)(struct file_lock *, void **);
bool (*lm_breaker_owns_lease)(struct file_lock *);
};

struct lock_manager {
struct list_head list;




bool block_opens;
};

struct net;
void locks_start_grace(struct net *, struct lock_manager *);
void locks_end_grace(struct lock_manager *);
bool locks_in_grace(struct net *);
bool opens_in_grace(struct net *);



# 1 "./include/linux/nfs_fs_i.h" 1




struct nlm_lockowner;




struct nfs_lock_info {
u32 state;
struct nlm_lockowner *owner;
struct list_head list;
};

struct nfs4_lock_state;
struct nfs4_lock_info {
struct nfs4_lock_state *owner;
};
# 1059 "./include/linux/fs.h" 2
# 1077 "./include/linux/fs.h"
struct file_lock {
struct file_lock *fl_blocker;
struct list_head fl_list;
struct hlist_node fl_link;
struct list_head fl_blocked_requests;


struct list_head fl_blocked_member;


fl_owner_t fl_owner;
unsigned int fl_flags;
unsigned char fl_type;
unsigned int fl_pid;
int fl_link_cpu;
wait_queue_head_t fl_wait;
struct file *fl_file;
loff_t fl_start;
loff_t fl_end;

struct fasync_struct * fl_fasync;

unsigned long fl_break_time;
unsigned long fl_downgrade_time;

const struct file_lock_operations *fl_ops;
const struct lock_manager_operations *fl_lmops;
union {
struct nfs_lock_info nfs_fl;
struct nfs4_lock_info nfs4_fl;
struct {
struct list_head link;
int state;
unsigned int debug_id;
} afs;
} fl_u;
} ;

struct file_lock_context {
spinlock_t flc_lock;
struct list_head flc_flock;
struct list_head flc_posix;
struct list_head flc_lease;
};
# 1129 "./include/linux/fs.h"
extern void send_sigio(struct fown_struct *fown, int fd, int band);




extern int fcntl_getlk(struct file *, unsigned int, struct flock *);
extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
struct flock *);







extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
extern int fcntl_getlease(struct file *filp);


void locks_free_lock_context(struct inode *inode);
void locks_free_lock(struct file_lock *fl);
extern void locks_init_lock(struct file_lock *);
extern struct file_lock * locks_alloc_lock(void);
extern void locks_copy_lock(struct file_lock *, struct file_lock *);
extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
extern void locks_remove_posix(struct file *, fl_owner_t);
extern void locks_remove_file(struct file *);
extern void locks_release_private(struct file_lock *);
extern void posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
extern int locks_delete_block(struct file_lock *);
extern int vfs_test_lock(struct file *, struct file_lock *);
extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
extern void lease_get_mtime(struct inode *, struct timespec64 *time);
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
extern int lease_modify(struct file_lock *, int, struct list_head *);

struct notifier_block;
extern int lease_register_notifier(struct notifier_block *);
extern void lease_unregister_notifier(struct notifier_block *);

struct files_struct;
extern void show_fd_locks(struct seq_file *f,
struct file *filp, struct files_struct *files);
# 1314 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inode *file_inode(const struct file *f)
{
return f->f_inode;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dentry *file_dentry(const struct file *file)
{
return d_real(file->f_path.dentry, file_inode(file));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
{
return locks_lock_inode_wait(file_inode(filp), fl);
}

struct fasync_struct {
rwlock_t fa_lock;
int magic;
int fa_fd;
struct fasync_struct *fa_next;
struct file *fa_file;
struct callback_head fa_rcu;
};




extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
extern int fasync_remove_entry(struct file *, struct fasync_struct **);
extern struct fasync_struct *fasync_alloc(void);
extern void fasync_free(struct fasync_struct *);


extern void kill_fasync(struct fasync_struct **, int, int);

extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
extern int f_setown(struct file *filp, unsigned long arg, int force);
extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
extern int send_sigurg(struct fown_struct *fown);
# 1416 "./include/linux/fs.h"
enum {
SB_UNFROZEN = 0,
SB_FREEZE_WRITE = 1,
SB_FREEZE_PAGEFAULT = 2,
SB_FREEZE_FS = 3,

SB_FREEZE_COMPLETE = 4,
};



struct sb_writers {
int frozen;
wait_queue_head_t wait_unfrozen;
struct percpu_rw_semaphore rw_sem[(SB_FREEZE_COMPLETE - 1)];
};

struct super_block {
struct list_head s_list;
dev_t s_dev;
unsigned char s_blocksize_bits;
unsigned long s_blocksize;
loff_t s_maxbytes;
struct file_system_type *s_type;
const struct super_operations *s_op;
const struct dquot_operations *dq_op;
const struct quotactl_ops *s_qcop;
const struct export_operations *s_export_op;
unsigned long s_flags;
unsigned long s_iflags;
unsigned long s_magic;
struct dentry *s_root;
struct rw_semaphore s_umount;
int s_count;
atomic_t s_active;



const struct xattr_handler **s_xattr;
# 1466 "./include/linux/fs.h"
struct hlist_bl_head s_roots;
struct list_head s_mounts;
struct block_device *s_bdev;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
struct hlist_node s_instances;
unsigned int s_quota_types;
struct quota_info s_dquot;

struct sb_writers s_writers;






void *s_fs_info;


u32 s_time_gran;

time64_t s_time_min;
time64_t s_time_max;

__u32 s_fsnotify_mask;
struct fsnotify_mark_connector *s_fsnotify_marks;


char s_id[32];
uuid_t s_uuid;

unsigned int s_max_links;
fmode_t s_mode;





struct mutex s_vfs_rename_mutex;





const char *s_subtype;

const struct dentry_operations *s_d_op;

struct shrinker s_shrink;


atomic_long_t s_remove_count;





atomic_long_t s_fsnotify_connectors;


int s_readonly_remount;


errseq_t s_wb_err;


struct workqueue_struct *s_dio_done_wq;
struct hlist_head s_pins;






struct user_namespace *s_user_ns;






struct list_lru s_dentry_lru;
struct list_lru s_inode_lru;
struct callback_head rcu;
struct work_struct destroy_work;

struct mutex s_sync_lock;




int s_stack_depth;


spinlock_t s_inode_list_lock __attribute__((__aligned__((1 << 6))));
struct list_head s_inodes;

spinlock_t s_inode_wblist_lock;
struct list_head s_inodes_wb;
} ;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct user_namespace *i_user_ns(const struct inode *inode)
{
return inode->i_sb->s_user_ns;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) uid_t i_uid_read(const struct inode *inode)
{
return from_kuid(i_user_ns(inode), inode->i_uid);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) gid_t i_gid_read(const struct inode *inode)
{
return from_kgid(i_user_ns(inode), inode->i_gid);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void i_uid_write(struct inode *inode, uid_t uid)
{
inode->i_uid = make_kuid(i_user_ns(inode), uid);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void i_gid_write(struct inode *inode, gid_t gid)
{
inode->i_gid = make_kgid(i_user_ns(inode), gid);
}
# 1605 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns,
const struct inode *inode)
{
return mapped_kuid_fs(mnt_userns, i_user_ns(inode), inode->i_uid);
}
# 1619 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kgid_t i_gid_into_mnt(struct user_namespace *mnt_userns,
const struct inode *inode)
{
return mapped_kgid_fs(mnt_userns, i_user_ns(inode), inode->i_gid);
}
# 1633 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_fsuid_set(struct inode *inode,
struct user_namespace *mnt_userns)
{
inode->i_uid = mapped_fsuid(mnt_userns, i_user_ns(inode));
}
# 1647 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_fsgid_set(struct inode *inode,
struct user_namespace *mnt_userns)
{
inode->i_gid = mapped_fsgid(mnt_userns, i_user_ns(inode));
}
# 1664 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fsuidgid_has_mapping(struct super_block *sb,
struct user_namespace *mnt_userns)
{
struct user_namespace *fs_userns = sb->s_user_ns;
kuid_t kuid;
kgid_t kgid;

kuid = mapped_fsuid(mnt_userns, fs_userns);
if (!uid_valid(kuid))
return false;
kgid = mapped_fsgid(mnt_userns, fs_userns);
if (!gid_valid(kgid))
return false;
return kuid_has_mapping(fs_userns, kuid) &&
kgid_has_mapping(fs_userns, kgid);
}

extern struct timespec64 current_time(struct inode *inode);
# 1691 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __sb_end_write(struct super_block *sb, int level)
{
percpu_up_read(sb->s_writers.rw_sem + level-1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __sb_start_write(struct super_block *sb, int level)
{
percpu_down_read(sb->s_writers.rw_sem + level - 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __sb_start_write_trylock(struct super_block *sb, int level)
{
return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1);
}
# 1718 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sb_end_write(struct super_block *sb)
{
__sb_end_write(sb, SB_FREEZE_WRITE);
}
# 1730 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sb_end_pagefault(struct super_block *sb)
{
__sb_end_write(sb, SB_FREEZE_PAGEFAULT);
}
# 1742 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sb_end_intwrite(struct super_block *sb)
{
__sb_end_write(sb, SB_FREEZE_FS);
}
# 1766 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sb_start_write(struct super_block *sb)
{
__sb_start_write(sb, SB_FREEZE_WRITE);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sb_start_write_trylock(struct super_block *sb)
{
return __sb_start_write_trylock(sb, SB_FREEZE_WRITE);
}
# 1795 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sb_start_pagefault(struct super_block *sb)
{
__sb_start_write(sb, SB_FREEZE_PAGEFAULT);
}
# 1813 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sb_start_intwrite(struct super_block *sb)
{
__sb_start_write(sb, SB_FREEZE_FS);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sb_start_intwrite_trylock(struct super_block *sb)
{
return __sb_start_write_trylock(sb, SB_FREEZE_FS);
}

bool inode_owner_or_capable(struct user_namespace *mnt_userns,
const struct inode *inode);




int vfs_create(struct user_namespace *, struct inode *,
struct dentry *, umode_t, bool);
int vfs_mkdir(struct user_namespace *, struct inode *,
struct dentry *, umode_t);
int vfs_mknod(struct user_namespace *, struct inode *, struct dentry *,
umode_t, dev_t);
int vfs_symlink(struct user_namespace *, struct inode *,
struct dentry *, const char *);
int vfs_link(struct dentry *, struct user_namespace *, struct inode *,
struct dentry *, struct inode **);
int vfs_rmdir(struct user_namespace *, struct inode *, struct dentry *);
int vfs_unlink(struct user_namespace *, struct inode *, struct dentry *,
struct inode **);
# 1854 "./include/linux/fs.h"
struct renamedata {
struct user_namespace *old_mnt_userns;
struct inode *old_dir;
struct dentry *old_dentry;
struct user_namespace *new_mnt_userns;
struct inode *new_dir;
struct dentry *new_dentry;
struct inode **delegated_inode;
unsigned int flags;
} ;

int vfs_rename(struct renamedata *);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int vfs_whiteout(struct user_namespace *mnt_userns,
struct inode *dir, struct dentry *dentry)
{
return vfs_mknod(mnt_userns, dir, dentry, 0020000 | 0,
0);
}

struct dentry *vfs_tmpfile(struct user_namespace *mnt_userns,
struct dentry *dentry, umode_t mode, int open_flag);

int vfs_mkobj(struct dentry *, umode_t,
int (*f)(struct dentry *, umode_t, void *),
void *);

int vfs_fchown(struct file *file, uid_t user, gid_t group);
int vfs_fchmod(struct file *file, umode_t mode);
int vfs_utimes(const struct path *path, struct timespec64 *times);

extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
# 1897 "./include/linux/fs.h"
void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode,
const struct inode *dir, umode_t mode);
extern bool may_open_dev(const struct path *path);







struct dir_context;
typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
unsigned);

struct dir_context {
filldir_t actor;
loff_t pos;
};
# 1955 "./include/linux/fs.h"
struct iov_iter;

struct file_operations {
struct module *owner;
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*read) (struct file *, char *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char *, size_t, loff_t *);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *,
unsigned int flags);
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
unsigned long mmap_supported_flags;
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
int (*setlease)(struct file *, long, struct file_lock **, void **);
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
void (*show_fdinfo)(struct seq_file *m, struct file *f);



ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
loff_t, size_t, unsigned int);
loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int);
} ;

struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
int (*permission) (struct user_namespace *, struct inode *, int);
struct posix_acl * (*get_acl)(struct inode *, int, bool);

int (*readlink) (struct dentry *, char *,int);

int (*create) (struct user_namespace *, struct inode *,struct dentry *,
umode_t, bool);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct user_namespace *, struct inode *,struct dentry *,
const char *);
int (*mkdir) (struct user_namespace *, struct inode *,struct dentry *,
umode_t);
int (*rmdir) (struct inode *,struct dentry *);
int (*mknod) (struct user_namespace *, struct inode *,struct dentry *,
umode_t,dev_t);
int (*rename) (struct user_namespace *, struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
int (*setattr) (struct user_namespace *, struct dentry *,
struct iattr *);
int (*getattr) (struct user_namespace *, const struct path *,
struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
int (*update_time)(struct inode *, struct timespec64 *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
umode_t create_mode);
int (*tmpfile) (struct user_namespace *, struct inode *,
struct dentry *, umode_t);
int (*set_acl)(struct user_namespace *, struct inode *,
struct posix_acl *, int);
int (*fileattr_set)(struct user_namespace *mnt_userns,
struct dentry *dentry, struct fileattr *fa);
int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa);
} __attribute__((__aligned__((1 << 6))));

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ssize_t call_read_iter(struct file *file, struct kiocb *kio,
struct iov_iter *iter)
{
return file->f_op->read_iter(kio, iter);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ssize_t call_write_iter(struct file *file, struct kiocb *kio,
struct iov_iter *iter)
{
return file->f_op->write_iter(kio, iter);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int call_mmap(struct file *file, struct vm_area_struct *vma)
{
return file->f_op->mmap(file, vma);
}

extern ssize_t vfs_read(struct file *, char *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char *, size_t, loff_t *);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
size_t len, unsigned int flags);
extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *count,
unsigned int remap_flags);
extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
extern int vfs_dedupe_file_range(struct file *file,
struct file_dedupe_range *same);
extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct file *dst_file, loff_t dst_pos,
loff_t len, unsigned int remap_flags);


struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
void (*free_inode)(struct inode *);

void (*dirty_inode) (struct inode *, int flags);
int (*write_inode) (struct inode *, struct writeback_control *wbc);
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_super) (struct super_block *);
int (*freeze_fs) (struct super_block *);
int (*thaw_super) (struct super_block *);
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
void (*umount_begin) (struct super_block *);

int (*show_options)(struct seq_file *, struct dentry *);
int (*show_devname)(struct seq_file *, struct dentry *);
int (*show_path)(struct seq_file *, struct dentry *);
int (*show_stats)(struct seq_file *, struct dentry *);





long (*nr_cached_objects)(struct super_block *,
struct shrink_control *);
long (*free_cached_objects)(struct super_block *,
struct shrink_control *);
};
# 2157 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & 1; }
# 2187 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns,
struct inode *inode)
{
return !uid_valid(i_uid_into_mnt(mnt_userns, inode)) ||
!gid_valid(i_gid_into_mnt(mnt_userns, inode));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int iocb_flags(struct file *file);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_filp = filp,
.ki_flags = iocb_flags(filp),
.ki_ioprio = get_current_ioprio(),
};
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_filp = filp,
.ki_flags = kiocb_src->ki_flags,
.ki_ioprio = kiocb_src->ki_ioprio,
.ki_pos = kiocb_src->ki_pos,
};
}
# 2321 "./include/linux/fs.h"
extern void __mark_inode_dirty(struct inode *, int);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mark_inode_dirty(struct inode *inode)
{
__mark_inode_dirty(inode, (((1 << 0) | (1 << 1)) | (1 << 2)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mark_inode_dirty_sync(struct inode *inode)
{
__mark_inode_dirty(inode, (1 << 0));
}
# 2341 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inode_is_dirtytime_only(struct inode *inode)
{
return (inode->i_state & ((1 << 11) | (1 << 3) |
(1 << 5) | (1 << 4))) == (1 << 11);
}

extern void inc_nlink(struct inode *inode);
extern void drop_nlink(struct inode *inode);
extern void clear_nlink(struct inode *inode);
extern void set_nlink(struct inode *inode, unsigned int nlink);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_inc_link_count(struct inode *inode)
{
inc_nlink(inode);
mark_inode_dirty(inode);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_dec_link_count(struct inode *inode)
{
drop_nlink(inode);
mark_inode_dirty(inode);
}

enum file_time_flags {
S_ATIME = 1,
S_MTIME = 2,
S_CTIME = 4,
S_VERSION = 8,
};

extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
int inode_update_time(struct inode *inode, struct timespec64 *time, int flags);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void file_accessed(struct file *file)
{
if (!(file->f_flags & 01000000))
touch_atime(&file->f_path);
}

extern int file_modified(struct file *file);

int sync_inode_metadata(struct inode *inode, int wait);

struct file_system_type {
const char *name;
int fs_flags;







int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
void (*kill_sb) (struct super_block *);
struct module *owner;
struct file_system_type * next;
struct hlist_head fs_supers;

struct lock_class_key s_lock_key;
struct lock_class_key s_umount_key;
struct lock_class_key s_vfs_rename_key;
struct lock_class_key s_writers_key[(SB_FREEZE_COMPLETE - 1)];

struct lock_class_key i_lock_key;
struct lock_class_key i_mutex_key;
struct lock_class_key invalidate_lock_key;
struct lock_class_key i_mutex_dir_key;
};



extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
extern struct dentry *mount_single(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int));
extern struct dentry *mount_nodev(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int));
extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
void generic_shutdown_super(struct super_block *sb);
void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
void kill_litter_super(struct super_block *sb);
void deactivate_super(struct super_block *sb);
void deactivate_locked_super(struct super_block *sb);
int set_anon_super(struct super_block *s, void *data);
int set_anon_super_fc(struct super_block *s, struct fs_context *fc);
int get_anon_bdev(dev_t *);
void free_anon_bdev(dev_t);
struct super_block *sget_fc(struct fs_context *fc,
int (*test)(struct super_block *, struct fs_context *),
int (*set)(struct super_block *, struct fs_context *));
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags, void *data);
# 2462 "./include/linux/fs.h"
extern int register_filesystem(struct file_system_type *);
extern int unregister_filesystem(struct file_system_type *);
extern struct vfsmount *kern_mount(struct file_system_type *);
extern void kern_unmount(struct vfsmount *mnt);
extern int may_umount_tree(struct vfsmount *);
extern int may_umount(struct vfsmount *);
extern long do_mount(const char *, const char *,
const char *, unsigned long, void *);
extern struct vfsmount *collect_mounts(const struct path *);
extern void drop_collected_mounts(struct vfsmount *);
extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
struct vfsmount *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
extern bool our_mnt(struct vfsmount *mnt);
extern __attribute__((__format__(printf, 2, 3)))
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);

extern int current_umask(void);

extern void ihold(struct inode * inode);
extern void iput(struct inode *);
extern int generic_update_time(struct inode *, struct timespec64 *, int);


extern struct kobject *fs_kobj;




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int break_lease(struct inode *inode, unsigned int mode)
{






do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);
if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
return __break_lease(inode, mode, 32);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int break_deleg(struct inode *inode, unsigned int mode)
{






do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);
if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
return __break_lease(inode, mode, 4);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
{
int ret;

ret = break_deleg(inode, 00000001|00004000);
if (ret == -11 && delegated_inode) {
*delegated_inode = inode;
ihold(inode);
}
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int break_deleg_wait(struct inode **delegated_inode)
{
int ret;

ret = break_deleg(*delegated_inode, 00000001);
iput(*delegated_inode);
*delegated_inode = ((void *)0);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int break_layout(struct inode *inode, bool wait)
{
do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);
if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
return __break_lease(inode,
wait ? 00000001 : 00000001 | 00004000,
2048);
return 0;
}
# 2586 "./include/linux/fs.h"
struct audit_names;
struct filename {
const char *name;
const char *uptr;
int refcnt;
struct audit_names *aname;
const char iname[];
};
_Static_assert(__builtin_offsetof(struct filename, iname) % sizeof(long) == 0, "offsetof(struct filename, iname) % sizeof(long) == 0");

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct user_namespace *file_mnt_user_ns(struct file *file)
{
return mnt_user_ns(file->f_path.mnt);
}
# 2610 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_idmapped_mnt(const struct vfsmount *mnt)
{
return mnt_user_ns(mnt) != mnt->mnt_sb->s_user_ns;
}

extern long vfs_truncate(const struct path *, loff_t);
int do_truncate(struct user_namespace *, struct dentry *, loff_t start,
unsigned int time_attrs, struct file *filp);
extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern long do_sys_open(int dfd, const char *filename, int flags,
umode_t mode);
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(const struct path *,
const char *, int, umode_t);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct file *file_open_root_mnt(struct vfsmount *mnt,
const char *name, int flags, umode_t mode)
{
return file_open_root(&(struct path){.mnt = mnt, .dentry = mnt->mnt_root},
name, flags, mode);
}
extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern struct file * open_with_fake_path(const struct path *, int,
struct inode*, const struct cred *);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct file *file_clone_open(struct file *file)
{
return dentry_open(&file->f_path, file->f_flags, file->f_cred);
}
extern int filp_close(struct file *, fl_owner_t id);

extern struct filename *getname_flags(const char *, int, int *);
extern struct filename *getname_uflags(const char *, int);
extern struct filename *getname(const char *);
extern struct filename *getname_kernel(const char *);
extern void putname(struct filename *name);

extern int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);


extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) vfs_caches_init_early(void);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) vfs_caches_init(void);

extern struct kmem_cache *names_cachep;




extern struct super_block *blockdev_superblock;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sb_is_blkdev_sb(struct super_block *sb)
{
return 1 && sb == blockdev_superblock;
}

void emergency_thaw_all(void);
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
# 2679 "./include/linux/fs.h"
extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
extern int register_chrdev_region(dev_t, unsigned, const char *);
extern int __register_chrdev(unsigned int major, unsigned int baseminor,
unsigned int count, const char *name,
const struct file_operations *fops);
extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
unsigned int count, const char *name);
extern void unregister_chrdev_region(dev_t, unsigned);
extern void chrdev_show(struct seq_file *,off_t);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int register_chrdev(unsigned int major, const char *name,
const struct file_operations *fops)
{
return __register_chrdev(major, 0, 256, name, fops);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void unregister_chrdev(unsigned int major, const char *name)
{
__unregister_chrdev(major, 0, 256, name);
}

extern void init_special_inode(struct inode *, umode_t, dev_t);


extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);

extern int __attribute__((__warn_unused_result__)) file_fdatawait_range(struct file *file, loff_t lstart,
loff_t lend);
extern int __attribute__((__warn_unused_result__)) file_check_and_advance_wb_err(struct file *file);
extern int __attribute__((__warn_unused_result__)) file_write_and_wait_range(struct file *file,
loff_t start, loff_t end);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int file_write_and_wait(struct file *file)
{
return file_write_and_wait_range(file, 0, ((long long)(~0ULL >> 1)));
}

extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);

extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
unsigned int flags);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
{
if (iocb->ki_flags & ( int) (( __kernel_rwf_t)0x00000002)) {
int ret = vfs_fsync_range(iocb->ki_filp,
iocb->ki_pos - count, iocb->ki_pos - 1,
(iocb->ki_flags & ( int) (( __kernel_rwf_t)0x00000004)) ? 0 : 1);
if (ret)
return ret;
}

return count;
}

extern void emergency_sync(void);
extern void emergency_remount(void);


extern int bmap(struct inode *inode, sector_t *block);







int notify_change(struct user_namespace *, struct dentry *,
struct iattr *, struct inode **);
int inode_permission(struct user_namespace *, struct inode *, int);
int generic_permission(struct user_namespace *, struct inode *, int);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int file_permission(struct file *file, int mask)
{
return inode_permission(file_mnt_user_ns(file),
file_inode(file), mask);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int path_permission(const struct path *path, int mask)
{
return inode_permission(mnt_user_ns(path->mnt),
d_inode(path->dentry), mask);
}
int __check_sticky(struct user_namespace *mnt_userns, struct inode *dir,
struct inode *inode);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool execute_ok(struct inode *inode)
{
return (inode->i_mode & (00100|00010|00001)) || (((inode->i_mode) & 00170000) == 0040000);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inode_wrong_type(const struct inode *inode, umode_t mode)
{
return (inode->i_mode ^ mode) & 00170000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void file_start_write(struct file *file)
{
if (!(((file_inode(file)->i_mode) & 00170000) == 0100000))
return;
sb_start_write(file_inode(file)->i_sb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool file_start_write_trylock(struct file *file)
{
if (!(((file_inode(file)->i_mode) & 00170000) == 0100000))
return true;
return sb_start_write_trylock(file_inode(file)->i_sb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void file_end_write(struct file *file)
{
if (!(((file_inode(file)->i_mode) & 00170000) == 0100000))
return;
__sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
}
# 2823 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_write_access(struct inode *inode)
{
return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -26;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int deny_write_access(struct file *file)
{
struct inode *inode = file_inode(file);
return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -26;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_write_access(struct inode * inode)
{
atomic_dec(&inode->i_writecount);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void allow_write_access(struct file *file)
{
if (file)
atomic_inc(&file_inode(file)->i_writecount);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inode_is_open_for_write(const struct inode *inode)
{
return atomic_read(&inode->i_writecount) > 0;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void i_readcount_dec(struct inode *inode)
{
do { if (__builtin_expect(!!(!atomic_read(&inode->i_readcount)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/fs.h"), "i" (2849), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
atomic_dec(&inode->i_readcount);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void i_readcount_inc(struct inode *inode)
{
atomic_inc(&inode->i_readcount);
}
# 2866 "./include/linux/fs.h"
extern int do_pipe_flags(int *, int);

extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *);
ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos);
extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *);
extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *);
extern struct file * open_exec(const char *);


extern bool is_subdir(struct dentry *, struct dentry *);
extern bool path_is_under(const struct path *, const struct path *);

extern char *file_path(struct file *, char *, int);




extern loff_t default_llseek(struct file *file, loff_t offset, int whence);

extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);

extern int inode_init_always(struct super_block *, struct inode *);
extern void inode_init_once(struct inode *);
extern void address_space_init_once(struct address_space *mapping);
extern struct inode * igrab(struct inode *);
extern ino_t iunique(struct super_block *, ino_t);
extern int inode_needs_sync(struct inode *inode);
extern int generic_delete_inode(struct inode *inode);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int generic_drop_inode(struct inode *inode)
{
return !inode->i_nlink || inode_unhashed(inode);
}
extern void d_mark_dontcache(struct inode *inode);

extern struct inode *ilookup5_nowait(struct super_block *sb,
unsigned long hashval, int (*test)(struct inode *, void *),
void *data);
extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data);
extern struct inode *ilookup(struct super_block *sb, unsigned long ino);

extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
int (*test)(struct inode *, void *),
int (*set)(struct inode *, void *),
void *data);
extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
extern struct inode * iget_locked(struct super_block *, unsigned long);
extern struct inode *find_inode_nowait(struct super_block *,
unsigned long,
int (*match)(struct inode *,
unsigned long, void *),
void *data);
extern struct inode *find_inode_rcu(struct super_block *, unsigned long,
int (*)(struct inode *, void *), void *);
extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long);
extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
extern int insert_inode_locked(struct inode *);

extern void lockdep_annotate_inode_mutex_key(struct inode *inode);



extern void unlock_new_inode(struct inode *);
extern void discard_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
extern void evict_inodes(struct super_block *sb);
void dump_mapping(const struct address_space *);
# 2944 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_zero_ino(ino_t ino)
{
return (u32)ino == 0;
}

extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void __destroy_inode(struct inode *);
extern struct inode *new_inode_pseudo(struct super_block *sb);
extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
extern int should_remove_suid(struct dentry *);
extern int file_remove_privs(struct file *);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *
alloc_inode_sb(struct super_block *sb, struct kmem_cache *cache, gfp_t gfp)
{
return kmem_cache_alloc_lru(cache, &sb->s_inode_lru, gfp);
}

extern void __insert_inode_hash(struct inode *, unsigned long hashval);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void insert_inode_hash(struct inode *inode)
{
__insert_inode_hash(inode, inode->i_ino);
}

extern void __remove_inode_hash(struct inode *);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void remove_inode_hash(struct inode *inode)
{
if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash))
__remove_inode_hash(inode);
}

extern void inode_sb_list_add(struct inode *inode);
extern void inode_add_lru(struct inode *inode);

extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);

extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
int generic_write_checks_count(struct kiocb *iocb, loff_t *count);
extern int generic_write_check_limits(struct file *file, loff_t pos,
loff_t *count);
extern int generic_file_rw_checks(struct file *file_in, struct file *file_out);
ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *to,
ssize_t already_read);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
ssize_t generic_perform_write(struct kiocb *, struct iov_iter *);

ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
struct iov_iter *iter);
ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
struct iov_iter *iter);


extern ssize_t generic_file_splice_read(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
struct file *out, loff_t *, size_t len, unsigned int flags);
extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
loff_t *opos, size_t len, unsigned int flags);


extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
extern loff_t no_llseek(struct file *file, loff_t offset, int whence);
extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
int whence, loff_t maxsize, loff_t eof);
extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
int whence, loff_t size);
extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
int rw_verify_area(int, struct file *, const loff_t *, size_t);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
extern int stream_open(struct inode * inode, struct file * filp);


typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
loff_t file_offset);

enum {

DIO_LOCKING = 0x01,


DIO_SKIP_HOLES = 0x02,
};

ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
get_block_t get_block,
dio_iodone_t end_io, dio_submit_t submit_io,
int flags);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ssize_t blockdev_direct_IO(struct kiocb *iocb,
struct inode *inode,
struct iov_iter *iter,
get_block_t get_block)
{
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
get_block, ((void *)0), ((void *)0), DIO_LOCKING | DIO_SKIP_HOLES);
}


void inode_dio_wait(struct inode *inode);
# 3077 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_dio_begin(struct inode *inode)
{
atomic_inc(&inode->i_dio_count);
}
# 3089 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_dio_end(struct inode *inode)
{
if (atomic_dec_and_test(&inode->i_dio_count))
wake_up_bit(&inode->i_state, 9);
}




void dio_warn_stale_pagecache(struct file *filp);

extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);

extern const struct file_operations generic_ro_fops;



extern int readlink_copy(char *, int, const char *);
extern int page_readlink(struct dentry *, char *, int);
extern const char *page_get_link(struct dentry *, struct inode *,
struct delayed_call *);
extern void page_put_link(void *);
extern int __page_symlink(struct inode *inode, const char *symname, int len,
int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
void generic_fillattr(struct user_namespace *, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes);
void __inode_sub_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) loff_t __inode_get_bytes(struct inode *inode)
{
return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
}
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
const char *simple_get_link(struct dentry *, struct inode *,
struct delayed_call *);
extern const struct inode_operations simple_symlink_inode_operations;

extern int iterate_dir(struct file *, struct dir_context *);

int vfs_fstatat(int dfd, const char *filename, struct kstat *stat,
int flags);
int vfs_fstat(int fd, struct kstat *stat);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int vfs_stat(const char *filename, struct kstat *stat)
{
return vfs_fstatat(-100, filename, stat, 0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int vfs_lstat(const char *name, struct kstat *stat)
{
return vfs_fstatat(-100, name, stat, 0x100);
}

extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
extern int vfs_readlink(struct dentry *, char *, int);

extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern struct super_block *get_super(struct block_device *);
extern struct super_block *get_active_super(struct block_device *bdev);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
extern void iterate_supers(void (*)(struct super_block *, void *), void *);
extern void iterate_supers_type(struct file_system_type *,
void (*)(struct super_block *, void *), void *);

extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
extern int dcache_readdir(struct file *, struct dir_context *);
extern int simple_setattr(struct user_namespace *, struct dentry *,
struct iattr *);
extern int simple_getattr(struct user_namespace *, const struct path *,
struct kstat *, u32, unsigned int);
extern int simple_statfs(struct dentry *, struct kstatfs *);
extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
extern int simple_rename(struct user_namespace *, struct inode *,
struct dentry *, struct inode *, struct dentry *,
unsigned int);
extern void simple_recursive_removal(struct dentry *,
void (*callback)(struct dentry *));
extern int noop_fsync(struct file *, loff_t, loff_t, int);
extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
extern int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
extern const struct dentry_operations simple_dentry_operations;

extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
extern ssize_t generic_read_dir(struct file *, char *, size_t, loff_t *);
extern const struct file_operations simple_dir_operations;
extern const struct inode_operations simple_dir_inode_operations;
extern void make_empty_dir_inode(struct inode *inode);
extern bool is_empty_dir_inode(struct inode *inode);
struct tree_descr { const char *name; const struct file_operations *ops; int mode; };
struct dentry *d_alloc_name(struct dentry *, const char *);
extern int simple_fill_super(struct super_block *, unsigned long,
const struct tree_descr *);
extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
extern void simple_release_fs(struct vfsmount **mount, int *count);

extern ssize_t simple_read_from_buffer(void *to, size_t count,
loff_t *ppos, const void *from, size_t available);
extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
const void *from, size_t count);

extern int __generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_file_fsync(struct file *, loff_t, loff_t, int);

extern int generic_check_addressable(unsigned, u64);

extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry);


extern int buffer_migrate_page(struct address_space *,
struct page *, struct page *,
enum migrate_mode);
extern int buffer_migrate_page_norefs(struct address_space *,
struct page *, struct page *,
enum migrate_mode);





int may_setattr(struct user_namespace *mnt_userns, struct inode *inode,
unsigned int ia_valid);
int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
void setattr_copy(struct user_namespace *, struct inode *inode,
const struct iattr *attr);

extern int file_update_time(struct file *file);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool vma_is_dax(const struct vm_area_struct *vma)
{
return vma->vm_file && ((vma->vm_file->f_mapping->host)->i_flags & 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool vma_is_fsdax(struct vm_area_struct *vma)
{
struct inode *inode;

if (!0 || !vma->vm_file)
return false;
if (!vma_is_dax(vma))
return false;
inode = file_inode(vma->vm_file);
if ((((inode->i_mode) & 00170000) == 0020000))
return false;
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int iocb_flags(struct file *file)
{
int res = 0;
if (file->f_flags & 00002000)
res |= ( int) (( __kernel_rwf_t)0x00000010);
if (file->f_flags & 00040000)
res |= (1 << 17);
if ((file->f_flags & 00010000) || (((file->f_mapping->host)->i_sb->s_flags & (16)) || ((file->f_mapping->host)->i_flags & (1 << 0))))
res |= ( int) (( __kernel_rwf_t)0x00000002);
if (file->f_flags & 04000000)
res |= ( int) (( __kernel_rwf_t)0x00000004);
return res;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
{
int kiocb_flags = 0;


do { __attribute__((__noreturn__)) extern void __compiletime_assert_201(void) ; if (!(!(( int) ((( __kernel_rwf_t)0x00000001) | (( __kernel_rwf_t)0x00000002) | (( __kernel_rwf_t)0x00000004) | (( __kernel_rwf_t)0x00000008) | (( __kernel_rwf_t)0x00000010)) & (1 << 16)))) __compiletime_assert_201(); } while (0);

if (!flags)
return 0;
if (__builtin_expect(!!(flags & ~((( __kernel_rwf_t)0x00000001) | (( __kernel_rwf_t)0x00000002) | (( __kernel_rwf_t)0x00000004) | (( __kernel_rwf_t)0x00000008) | (( __kernel_rwf_t)0x00000010))), 0))
return -95;

if (flags & (( __kernel_rwf_t)0x00000008)) {
if (!(ki->ki_filp->f_mode & (( fmode_t)0x8000000)))
return -95;
kiocb_flags |= (1 << 20);
}
kiocb_flags |= ( int) (flags & ((( __kernel_rwf_t)0x00000001) | (( __kernel_rwf_t)0x00000002) | (( __kernel_rwf_t)0x00000004) | (( __kernel_rwf_t)0x00000008) | (( __kernel_rwf_t)0x00000010)));
if (flags & (( __kernel_rwf_t)0x00000004))
kiocb_flags |= ( int) (( __kernel_rwf_t)0x00000002);

ki->ki_flags |= kiocb_flags;
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ino_t parent_ino(struct dentry *dentry)
{
ino_t res;





spin_lock(&dentry->d_lockref.lock);
res = dentry->d_parent->d_inode->i_ino;
spin_unlock(&dentry->d_lockref.lock);
return res;
}







struct simple_transaction_argresp {
ssize_t size;
char data[];
};



char *simple_transaction_get(struct file *file, const char *buf,
size_t size);
ssize_t simple_transaction_read(struct file *file, char *buf,
size_t size, loff_t *pos);
int simple_transaction_release(struct inode *inode, struct file *file);

void simple_transaction_set(struct file *file, size_t n);
# 3366 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__format__(printf, 1, 2)))
void __simple_attr_check_format(const char *fmt, ...)
{

}

int simple_attr_open(struct inode *inode, struct file *file,
int (*get)(void *, u64 *), int (*set)(void *, u64),
const char *fmt);
int simple_attr_release(struct inode *inode, struct file *file);
ssize_t simple_attr_read(struct file *file, char *buf,
size_t len, loff_t *ppos);
ssize_t simple_attr_write(struct file *file, const char *buf,
size_t len, loff_t *ppos);

struct ctl_table;
int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) list_bdev_fs_names(char *buf, size_t size);
# 3391 "./include/linux/fs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_sxid(umode_t mode)
{
return (mode & 0004000) || ((mode & 0002000) && (mode & 00010));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int check_sticky(struct user_namespace *mnt_userns,
struct inode *dir, struct inode *inode)
{
if (!(dir->i_mode & 0001000))
return 0;

return __check_sticky(mnt_userns, dir, inode);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_has_no_xattr(struct inode *inode)
{
if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & (1<<28)))
inode->i_flags |= (1 << 12);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_root_inode(struct inode *inode)
{
return inode == inode->i_sb->s_root->d_inode;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dir_emit(struct dir_context *ctx,
const char *name, int namelen,
u64 ino, unsigned type)
{
return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dir_emit_dot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, ".", 1, ctx->pos,
file->f_path.dentry->d_inode->i_ino, 4) == 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, "..", 2, ctx->pos,
parent_ino(file->f_path.dentry), 4) == 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dir_emit_dots(struct file *file, struct dir_context *ctx)
{
if (ctx->pos == 0) {
if (!dir_emit_dot(file, ctx))
return false;
ctx->pos = 1;
}
if (ctx->pos == 1) {
if (!dir_emit_dotdot(file, ctx))
return false;
ctx->pos = 2;
}
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dir_relax(struct inode *inode)
{
inode_unlock(inode);
inode_lock(inode);
return !((inode)->i_flags & (1 << 4));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dir_relax_shared(struct inode *inode)
{
inode_unlock_shared(inode);
inode_lock_shared(inode);
return !((inode)->i_flags & (1 << 4));
}

extern bool path_noexec(const struct path *path);
extern void inode_nohighmem(struct inode *inode);


extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
# 24 "./include/linux/net.h" 2
# 1 "./include/linux/mm.h" 1
# 15 "./include/linux/mm.h"
# 1 "./include/linux/mmap_lock.h" 1







# 1 "./include/linux/tracepoint-defs.h" 1
# 12 "./include/linux/tracepoint-defs.h"
# 1 "./include/linux/static_key.h" 1
# 13 "./include/linux/tracepoint-defs.h" 2

struct static_call_key;

struct trace_print_flags {
unsigned long mask;
const char *name;
};

struct trace_print_flags_u64 {
unsigned long long mask;
const char *name;
};

struct tracepoint_func {
void *func;
void *data;
int prio;
};

struct tracepoint {
const char *name;
struct static_key key;
struct static_call_key *static_call_key;
void *static_call_tramp;
void *iterator;
int (*regfunc)(void);
void (*unregfunc)(void);
struct tracepoint_func *funcs;
};




typedef struct tracepoint * const tracepoint_ptr_t;


struct bpf_raw_event_map {
struct tracepoint *tp;
void *bpf_func;
u32 num_args;
u32 writable_size;
} __attribute__((__aligned__(32)));
# 9 "./include/linux/mmap_lock.h" 2





extern struct tracepoint __tracepoint_mmap_lock_start_locking;
extern struct tracepoint __tracepoint_mmap_lock_acquire_returned;
extern struct tracepoint __tracepoint_mmap_lock_released;



void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
bool success);
void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __mmap_lock_trace_start_locking(struct mm_struct *mm,
bool write)
{
if (static_key_false(&(__tracepoint_mmap_lock_start_locking).key))
__mmap_lock_do_trace_start_locking(mm, write);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
bool write, bool success)
{
if (static_key_false(&(__tracepoint_mmap_lock_acquire_returned).key))
__mmap_lock_do_trace_acquire_returned(mm, write, success);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
{
if (static_key_false(&(__tracepoint_mmap_lock_released).key))
__mmap_lock_do_trace_released(mm, write);
}
# 63 "./include/linux/mmap_lock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmap_init_lock(struct mm_struct *mm)
{
do { static struct lock_class_key __key; __init_rwsem((&mm->mmap_lock), "&mm->mmap_lock", &__key); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmap_write_lock(struct mm_struct *mm)
{
__mmap_lock_trace_start_locking(mm, true);
down_write(&mm->mmap_lock);
__mmap_lock_trace_acquire_returned(mm, true, true);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
{
__mmap_lock_trace_start_locking(mm, true);
down_write_nested(&mm->mmap_lock, subclass);
__mmap_lock_trace_acquire_returned(mm, true, true);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mmap_write_lock_killable(struct mm_struct *mm)
{
int ret;

__mmap_lock_trace_start_locking(mm, true);
ret = down_write_killable(&mm->mmap_lock);
__mmap_lock_trace_acquire_returned(mm, true, ret == 0);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mmap_write_trylock(struct mm_struct *mm)
{
bool ret;

__mmap_lock_trace_start_locking(mm, true);
ret = down_write_trylock(&mm->mmap_lock) != 0;
__mmap_lock_trace_acquire_returned(mm, true, ret);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmap_write_unlock(struct mm_struct *mm)
{
__mmap_lock_trace_released(mm, true);
up_write(&mm->mmap_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmap_write_downgrade(struct mm_struct *mm)
{
__mmap_lock_trace_acquire_returned(mm, false, true);
downgrade_write(&mm->mmap_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmap_read_lock(struct mm_struct *mm)
{
__mmap_lock_trace_start_locking(mm, false);
down_read(&mm->mmap_lock);
__mmap_lock_trace_acquire_returned(mm, false, true);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mmap_read_lock_killable(struct mm_struct *mm)
{
int ret;

__mmap_lock_trace_start_locking(mm, false);
ret = down_read_killable(&mm->mmap_lock);
__mmap_lock_trace_acquire_returned(mm, false, ret == 0);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mmap_read_trylock(struct mm_struct *mm)
{
bool ret;

__mmap_lock_trace_start_locking(mm, false);
ret = down_read_trylock(&mm->mmap_lock) != 0;
__mmap_lock_trace_acquire_returned(mm, false, ret);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmap_read_unlock(struct mm_struct *mm)
{
__mmap_lock_trace_released(mm, false);
up_read(&mm->mmap_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmap_read_unlock_non_owner(struct mm_struct *mm)
{
__mmap_lock_trace_released(mm, false);
up_read_non_owner(&mm->mmap_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmap_assert_locked(struct mm_struct *mm)
{
do { ({ int __ret_warn_on = !!(debug_locks && !(lock_is_held(&(&mm->mmap_lock)->dep_map) != 0)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mmap_lock.h"), "i" (155), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0);
do { if (__builtin_expect(!!(!rwsem_is_locked(&mm->mmap_lock)), 0)) { dump_mm(mm); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mmap_lock.h"), "i" (156), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmap_assert_write_locked(struct mm_struct *mm)
{
do { ({ int __ret_warn_on = !!(debug_locks && !(lock_is_held_type(&(&mm->mmap_lock)->dep_map, (0)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mmap_lock.h"), "i" (161), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0);
do { if (__builtin_expect(!!(!rwsem_is_locked(&mm->mmap_lock)), 0)) { dump_mm(mm); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mmap_lock.h"), "i" (162), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mmap_lock_is_contended(struct mm_struct *mm)
{
return rwsem_is_contended(&mm->mmap_lock);
}
# 16 "./include/linux/mm.h" 2
# 1 "./include/linux/range.h" 1





struct range {
u64 start;
u64 end;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 range_len(const struct range *range)
{
return range->end - range->start + 1;
}

int add_range(struct range *range, int az, int nr_range,
u64 start, u64 end);


int add_range_with_merge(struct range *range, int az, int nr_range,
u64 start, u64 end);

void subtract_range(struct range *range, int az, u64 start, u64 end);

int clean_sort_range(struct range *range, int az);

void sort_range(struct range *range, int nr_range);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) resource_size_t cap_resource(u64 val)
{
if (val > ((resource_size_t)~0))
return ((resource_size_t)~0);

return val;
}
# 17 "./include/linux/mm.h" 2





# 1 "./include/linux/page_ext.h" 1






# 1 "./include/linux/stackdepot.h" 1
# 16 "./include/linux/stackdepot.h"
typedef u32 depot_stack_handle_t;

depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t gfp_flags, bool can_alloc);
# 30 "./include/linux/stackdepot.h"
int stack_depot_init(void);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int stack_depot_early_init(void) { return 0; }


depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries, gfp_t gfp_flags);

unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries);

int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces);

void stack_depot_print(depot_stack_handle_t stack);
# 8 "./include/linux/page_ext.h" 2

struct pglist_data;
struct page_ext_operations {
size_t offset;
size_t size;
bool (*need)(void);
void (*init)(void);
};



enum page_ext_flags {
PAGE_EXT_OWNER,
PAGE_EXT_OWNER_ALLOCATED,




};
# 35 "./include/linux/page_ext.h"
struct page_ext {
unsigned long flags;
};

extern unsigned long page_ext_size;
extern void pgdat_page_ext_init(struct pglist_data *pgdat);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_ext_init_flatmem(void)
{
}
extern void page_ext_init(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_ext_init_flatmem_late(void)
{
}
# 58 "./include/linux/page_ext.h"
struct page_ext *lookup_page_ext(const struct page *page);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page_ext *page_ext_next(struct page_ext *curr)
{
void *next = curr;
next += page_ext_size;
return next;
}
# 23 "./include/linux/mm.h" 2


# 1 "./include/linux/page_ref.h" 1
# 10 "./include/linux/page_ref.h"
extern struct tracepoint __tracepoint_page_ref_set;
extern struct tracepoint __tracepoint_page_ref_mod;
extern struct tracepoint __tracepoint_page_ref_mod_and_test;
extern struct tracepoint __tracepoint_page_ref_mod_and_return;
extern struct tracepoint __tracepoint_page_ref_mod_unless;
extern struct tracepoint __tracepoint_page_ref_freeze;
extern struct tracepoint __tracepoint_page_ref_unfreeze;
# 41 "./include/linux/page_ref.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __page_ref_set(struct page *page, int v)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __page_ref_mod(struct page *page, int v)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __page_ref_mod_and_test(struct page *page, int v, int ret)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __page_ref_mod_and_return(struct page *page, int v, int ret)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __page_ref_mod_unless(struct page *page, int v, int u)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __page_ref_freeze(struct page *page, int v, int ret)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __page_ref_unfreeze(struct page *page, int v)
{
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_ref_count(const struct page *page)
{
return atomic_read(&page->_refcount);
}
# 87 "./include/linux/page_ref.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int folio_ref_count(const struct folio *folio)
{
return page_ref_count(&folio->page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_count(const struct page *page)
{
return folio_ref_count((_Generic((page), const struct page *: (const struct folio *)_compound_head(page), struct page *: (struct folio *)_compound_head(page))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_page_count(struct page *page, int v)
{
atomic_set(&page->_refcount, v);
if (false)
__page_ref_set(page, v);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_set_count(struct folio *folio, int v)
{
set_page_count(&folio->page, v);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void init_page_count(struct page *page)
{
set_page_count(page, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_ref_add(struct page *page, int nr)
{
atomic_add(nr, &page->_refcount);
if (false)
__page_ref_mod(page, nr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_ref_add(struct folio *folio, int nr)
{
page_ref_add(&folio->page, nr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_ref_sub(struct page *page, int nr)
{
atomic_sub(nr, &page->_refcount);
if (false)
__page_ref_mod(page, -nr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_ref_sub(struct folio *folio, int nr)
{
page_ref_sub(&folio->page, nr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_ref_sub_return(struct page *page, int nr)
{
int ret = atomic_sub_return(nr, &page->_refcount);

if (false)
__page_ref_mod_and_return(page, -nr, ret);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int folio_ref_sub_return(struct folio *folio, int nr)
{
return page_ref_sub_return(&folio->page, nr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_ref_inc(struct page *page)
{
atomic_inc(&page->_refcount);
if (false)
__page_ref_mod(page, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_ref_inc(struct folio *folio)
{
page_ref_inc(&folio->page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_ref_dec(struct page *page)
{
atomic_dec(&page->_refcount);
if (false)
__page_ref_mod(page, -1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_ref_dec(struct folio *folio)
{
page_ref_dec(&folio->page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_ref_sub_and_test(struct page *page, int nr)
{
int ret = atomic_sub_and_test(nr, &page->_refcount);

if (false)
__page_ref_mod_and_test(page, -nr, ret);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int folio_ref_sub_and_test(struct folio *folio, int nr)
{
return page_ref_sub_and_test(&folio->page, nr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_ref_inc_return(struct page *page)
{
int ret = atomic_inc_return(&page->_refcount);

if (false)
__page_ref_mod_and_return(page, 1, ret);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int folio_ref_inc_return(struct folio *folio)
{
return page_ref_inc_return(&folio->page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_ref_dec_and_test(struct page *page)
{
int ret = atomic_dec_and_test(&page->_refcount);

if (false)
__page_ref_mod_and_test(page, -1, ret);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int folio_ref_dec_and_test(struct folio *folio)
{
return page_ref_dec_and_test(&folio->page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_ref_dec_return(struct page *page)
{
int ret = atomic_dec_return(&page->_refcount);

if (false)
__page_ref_mod_and_return(page, -1, ret);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int folio_ref_dec_return(struct folio *folio)
{
return page_ref_dec_return(&folio->page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool page_ref_add_unless(struct page *page, int nr, int u)
{
bool ret = atomic_add_unless(&page->_refcount, nr, u);

if (false)
__page_ref_mod_unless(page, nr, ret);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_ref_add_unless(struct folio *folio, int nr, int u)
{
return page_ref_add_unless(&folio->page, nr, u);
}
# 261 "./include/linux/page_ref.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_try_get(struct folio *folio)
{
return folio_ref_add_unless(folio, 1, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_ref_try_add_rcu(struct folio *folio, int count)
{
# 280 "./include/linux/page_ref.h"
if (__builtin_expect(!!(!folio_ref_add_unless(folio, count, 0)), 0)) {

return false;
}

return true;
}
# 311 "./include/linux/page_ref.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_try_get_rcu(struct folio *folio)
{
return folio_ref_try_add_rcu(folio, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_ref_freeze(struct page *page, int count)
{
int ret = __builtin_expect(!!(atomic_cmpxchg(&page->_refcount, count, 0) == count), 1);

if (false)
__page_ref_freeze(page, count, ret);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int folio_ref_freeze(struct folio *folio, int count)
{
return page_ref_freeze(&folio->page, count);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_ref_unfreeze(struct page *page, int count)
{
do { if (__builtin_expect(!!(page_count(page) != 0), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "page_count(page) != 0"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page_ref.h"), "i" (332), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
do { if (__builtin_expect(!!(count == 0), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/page_ref.h"), "i" (333), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);

atomic_set_release(&page->_refcount, count);
if (false)
__page_ref_unfreeze(page, count);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_ref_unfreeze(struct folio *folio, int count)
{
page_ref_unfreeze(&folio->page, count);
}
# 26 "./include/linux/mm.h" 2



# 1 "./include/linux/pgtable.h" 1
# 14 "./include/linux/pgtable.h"
# 1 "./include/asm-generic/pgtable_uffd.h" 1




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int pte_uffd_wp(pte_t pte)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int pmd_uffd_wp(pmd_t pmd)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) pte_t pte_mkuffd_wp(pte_t pte)
{
return pte;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) pmd_t pmd_mkuffd_wp(pmd_t pmd)
{
return pmd;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) pte_t pte_clear_uffd_wp(pte_t pte)
{
return pte;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) pmd_t pmd_clear_uffd_wp(pmd_t pmd)
{
return pmd;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) pte_t pte_swp_mkuffd_wp(pte_t pte)
{
return pte;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int pte_swp_uffd_wp(pte_t pte)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) pte_t pte_swp_clear_uffd_wp(pte_t pte)
{
return pte;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
{
return pmd;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_swp_uffd_wp(pmd_t pmd)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
{
return pmd;
}
# 15 "./include/linux/pgtable.h" 2
# 61 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long pte_index(unsigned long address)
{
return (address >> (12)) & ((((1UL) << (12)) / sizeof(pte_t)) - 1);
}
# 89 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}
# 108 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
return pud_pgtable(*pud) + (((address) >> 21) & ((((1UL) << (12)) / sizeof(pmd_t)) - 1));
}
# 123 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
{
return (pgd + (((address) >> (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30))) & ((((1UL) << (12)) / sizeof(pgd_t)) - 1)));
};
# 150 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
{
return pmd_offset(pud_offset(p4d_offset(pgd_offset_pgd((mm)->pgd, (va)), va), va), va);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t *pmd_off_k(unsigned long va)
{
return pmd_offset(pud_offset(p4d_offset(pgd_offset_pgd((&init_mm)->pgd, ((va))), va), va), va);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t *virt_to_kpte(unsigned long vaddr)
{
pmd_t *pmd = pmd_off_k(vaddr);

return pmd_none(*pmd) ? ((void *)0) : pte_offset_kernel(pmd, vaddr);
}
# 246 "./include/linux/pgtable.h"
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
# 263 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ptep_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_clear(mm, addr, ptep);
}
# 282 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t ptep_get(pte_t *ptep)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_202(void) ; if (!((sizeof(*ptep) == sizeof(char) || sizeof(*ptep) == sizeof(short) || sizeof(*ptep) == sizeof(int) || sizeof(*ptep) == sizeof(long)) || sizeof(*ptep) == sizeof(long long))) __compiletime_assert_202(); } while (0); (*(const volatile typeof( _Generic((*ptep), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*ptep))) *)&(*ptep)); });
}
# 337 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t ptep_get_lockless(pte_t *ptep)
{
return ptep_get(ptep);
}
# 355 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address,
pud_t *pudp)
{
pud_t pud = *pudp;

pud_clear(pudp);
return pud;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
int full)
{
return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
unsigned long address, pud_t *pudp,
int full)
{
return pudp_huge_get_and_clear(mm, address, pudp);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long address, pte_t *ptep,
int full)
{
pte_t pte;
pte = ptep_get_and_clear(mm, address, ptep);
return pte;
}
# 408 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void update_mmu_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
}
# 421 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pte_clear_not_present_full(struct mm_struct *mm,
unsigned long address,
pte_t *ptep,
int full)
{
pte_clear(mm, address, ptep);
}



extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address,
pte_t *ptep);



extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp);
extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
unsigned long address,
pud_t *pudp);
# 463 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_sw_mkyoung(pte_t pte)
{
return pte;
}
# 520 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pudp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pud_t *pudp)
{
do { __attribute__((__noreturn__)) extern void __compiletime_assert_203(void) ; if (!(!(1))) __compiletime_assert_203(); } while (0);
}





extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
# 545 "./include/linux/pgtable.h"
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);



extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
# 559 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
pmd_t old_pmd = *pmdp;
set_pmd_at(vma->vm_mm, address, pmdp, pmd);
return old_pmd;
}



extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
# 587 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_unused(pte_t pte)
{
return 0;
}
# 619 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return ((pmd_a).pmd) == ((pmd_b).pmd);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_same(pud_t pud_a, pud_t pud_b)
{
return ((pud_a).pud) == ((pud_b).pud);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
{
return ((p4d_a).p4d) == ((p4d_b).p4d);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
{
return ((pgd_a).pgd) == ((pgd_b).pgd);
}
# 691 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_do_swap_page(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long addr,
pte_t pte, pte_t oldpte)
{

}
# 709 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_unmap_one(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long addr,
pte_t orig_pte)
{
return 0;
}
# 724 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_prepare_to_swap(struct page *page)
{
return 0;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_swap_invalidate_page(int type, unsigned long offset)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_swap_invalidate_area(int type)
{
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_swap_restore(swp_entry_t entry, struct page *page)
{
}
# 799 "./include/linux/pgtable.h"
void pgd_clear_bad(pgd_t *);


void p4d_clear_bad(p4d_t *);





void pud_clear_bad(pud_t *);




void pmd_clear_bad(pmd_t *);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pgd_none_or_clear_bad(pgd_t *pgd)
{
if (pgd_none(*pgd))
return 1;
if (__builtin_expect(!!(pgd_bad(*pgd)), 0)) {
pgd_clear_bad(pgd);
return 1;
}
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int p4d_none_or_clear_bad(p4d_t *p4d)
{
if (p4d_none(*p4d))
return 1;
if (__builtin_expect(!!(p4d_bad(*p4d)), 0)) {
p4d_clear_bad(p4d);
return 1;
}
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_none_or_clear_bad(pud_t *pud)
{
if (pud_none(*pud))
return 1;
if (__builtin_expect(!!(pud_bad(*pud)), 0)) {
pud_clear_bad(pud);
return 1;
}
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_none_or_clear_bad(pmd_t *pmd)
{
if (pmd_none(*pmd))
return 1;
if (__builtin_expect(!!(pmd_bad(*pmd)), 0)) {
pmd_clear_bad(pmd);
return 1;
}
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep)
{





return ptep_get_and_clear(vma->vm_mm, addr, ptep);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep, pte_t pte)
{




set_pte_at(vma->vm_mm, addr, ptep, pte);
}
# 897 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep)
{
return __ptep_modify_prot_start(vma, addr, ptep);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep, pte_t old_pte, pte_t pte)
{
__ptep_modify_prot_commit(vma, addr, ptep, pte);
}
# 949 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
if (((oldprot).pgprot) == (((oldprot)).pgprot))
newprot = (newprot);
if (((oldprot).pgprot) == (((oldprot)).pgprot))
newprot = (newprot);
if (((oldprot).pgprot) == (((oldprot)).pgprot))
newprot = (newprot);
return newprot;
}
# 1024 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_soft_dirty(pte_t pte)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_soft_dirty(pmd_t pmd)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_mksoft_dirty(pte_t pte)
{
return pte;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
return pmd;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_clear_soft_dirty(pte_t pte)
{
return pte;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_clear_soft_dirty(pmd_t pmd)
{
return pmd;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_swp_mksoft_dirty(pte_t pte)
{
return pte;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_swp_soft_dirty(pte_t pte)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
return pte;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
{
return pmd;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_swp_soft_dirty(pmd_t pmd)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
{
return pmd;
}
# 1096 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long addr,
unsigned long size)
{
return 0;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
pfn_t pfn)
{
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int track_pfn_copy(struct vm_area_struct *vma)
{
return 0;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void untrack_pfn(struct vm_area_struct *vma,
unsigned long pfn, unsigned long size)
{
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void untrack_pfn_moved(struct vm_area_struct *vma)
{
}
# 1161 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int is_zero_pfn(unsigned long pfn)
{
extern unsigned long zero_pfn;
return pfn == zero_pfn;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long my_zero_pfn(unsigned long addr)
{
extern unsigned long zero_pfn;
return zero_pfn;
}
# 1202 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_write(pud_t pud)
{
do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/pgtable.h"), "i" (1204), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0);
return 0;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_devmap(pmd_t pmd)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_devmap(pud_t pud)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pgd_devmap(pgd_t pgd)
{
return 0;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_trans_huge(pud_t pud)
{
return 0;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
{
pud_t pudval = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_204(void) ; if (!((sizeof(*pud) == sizeof(char) || sizeof(*pud) == sizeof(short) || sizeof(*pud) == sizeof(int) || sizeof(*pud) == sizeof(long)) || sizeof(*pud) == sizeof(long long))) __compiletime_assert_204(); } while (0); (*(const volatile typeof( _Generic((*pud), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*pud))) *)&(*pud)); });

if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
return 1;
if (__builtin_expect(!!(pud_bad(pudval)), 0)) {
pud_clear_bad(pud);
return 1;
}
return 0;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_trans_unstable(pud_t *pud)
{




return 0;

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t pmd_read_atomic(pmd_t *pmdp)
{





return *pmdp;
}
# 1294 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
{
pmd_t pmdval = pmd_read_atomic(pmd);
# 1312 "./include/linux/pgtable.h"
__asm__ __volatile__("": : :"memory");
# 1329 "./include/linux/pgtable.h"
if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
(1 && !pmd_present(pmdval)))
return 1;
if (__builtin_expect(!!(pmd_bad(pmdval)), 0)) {
pmd_clear_bad(pmd);
return 1;
}
return 0;
}
# 1351 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_trans_unstable(pmd_t *pmd)
{

return pmd_none_or_trans_huge_or_clear_bad(pmd);



}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_devmap_trans_unstable(pmd_t *pmd)
{
return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
}
# 1380 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_protnone(pte_t pte)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_protnone(pmd_t pmd)
{
return 0;
}
# 1417 "./include/linux/pgtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int p4d_clear_huge(p4d_t *p4d)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_clear_huge(pud_t *pud)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return 0;
}
# 1474 "./include/linux/pgtable.h"
struct file;
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t *vma_prot);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void init_espfix_bsp(void) { }


extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) pgtable_cache_init(void);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
{
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool arch_has_pfn_modify_check(void)
{
return false;
}
# 1535 "./include/linux/pgtable.h"
typedef unsigned int pgtbl_mod_mask;
# 30 "./include/linux/mm.h" 2


struct mempolicy;
struct anon_vma;
struct anon_vma_chain;
struct user_struct;
struct pt_regs;

extern int sysctl_page_lock_unfairness;

void init_mm_internals(void);


extern unsigned long max_mapnr;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_max_mapnr(unsigned long limit)
{
max_mapnr = limit;
}




extern atomic_long_t _totalram_pages;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long totalram_pages(void)
{
return (unsigned long)atomic_long_read(&_totalram_pages);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void totalram_pages_inc(void)
{
atomic_long_inc(&_totalram_pages);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void totalram_pages_dec(void)
{
atomic_long_dec(&_totalram_pages);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void totalram_pages_add(long count)
{
atomic_long_add(count, &_totalram_pages);
}

extern void * high_memory;
extern int page_cluster;


extern int sysctl_legacy_va_layout;





extern const int mmap_rnd_bits_min;
extern const int mmap_rnd_bits_max;
extern int mmap_rnd_bits ;
# 145 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __mm_zero_struct_page(struct page *page)
{
unsigned long *_pp = (void *)page;


do { __attribute__((__noreturn__)) extern void __compiletime_assert_205(void) ; if (!(!(sizeof(struct page) & 7))) __compiletime_assert_205(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_206(void) ; if (!(!(sizeof(struct page) < 56))) __compiletime_assert_206(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_207(void) ; if (!(!(sizeof(struct page) > 80))) __compiletime_assert_207(); } while (0);

switch (sizeof(struct page)) {
case 80:
_pp[9] = 0;
__attribute__((__fallthrough__));
case 72:
_pp[8] = 0;
__attribute__((__fallthrough__));
case 64:
_pp[7] = 0;
__attribute__((__fallthrough__));
case 56:
_pp[6] = 0;
_pp[5] = 0;
_pp[4] = 0;
_pp[3] = 0;
_pp[2] = 0;
_pp[1] = 0;
_pp[0] = 0;
}
}
# 197 "./include/linux/mm.h"
extern int sysctl_max_map_count;

extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;

extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern unsigned long sysctl_overcommit_kbytes;

int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
# 228 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct folio *lru_to_folio(struct list_head *head)
{
return ({ void *__mptr = (void *)((head)->prev); _Static_assert(__builtin_types_compatible_p(typeof(*((head)->prev)), typeof(((struct folio *)0)->lru)) || __builtin_types_compatible_p(typeof(*((head)->prev)), typeof(void)), "pointer type mismatch in container_of()"); ((struct folio *)(__mptr - __builtin_offsetof(struct folio, lru))); });
}

void setup_initial_init_mm(void *start_code, void *end_code,
void *end_data, void *brk);
# 245 "./include/linux/mm.h"
struct vm_area_struct *vm_area_alloc(struct mm_struct *);
struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
void vm_area_free(struct vm_area_struct *);
# 427 "./include/linux/mm.h"
extern pgprot_t protection_map[16];
# 450 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fault_flag_allow_retry_first(enum fault_flag flags)
{
return (flags & FAULT_FLAG_ALLOW_RETRY) &&
(!(flags & FAULT_FLAG_TRIED));
}
# 478 "./include/linux/mm.h"
struct vm_fault {
const struct {
struct vm_area_struct *vma;
gfp_t gfp_mask;
unsigned long pgoff;
unsigned long address;
unsigned long real_address;
};
enum fault_flag flags;

pmd_t *pmd;

pud_t *pud;


union {
pte_t orig_pte;
pmd_t orig_pmd;


};

struct page *cow_page;
struct page *page;





pte_t *pte;



spinlock_t *ptl;



pgtable_t prealloc_pte;






};


enum page_entry_size {
PE_SIZE_PTE = 0,
PE_SIZE_PMD,
PE_SIZE_PUD,
};






struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);




void (*close)(struct vm_area_struct * area);

int (*may_split)(struct vm_area_struct *area, unsigned long addr);
int (*mremap)(struct vm_area_struct *area);





int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long newflags);
vm_fault_t (*fault)(struct vm_fault *vmf);
vm_fault_t (*huge_fault)(struct vm_fault *vmf,
enum page_entry_size pe_size);
vm_fault_t (*map_pages)(struct vm_fault *vmf,
unsigned long start_pgoff, unsigned long end_pgoff);
unsigned long (*pagesize)(struct vm_area_struct * area);



vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);


vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);





int (*access)(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);




const char *(*name)(struct vm_area_struct *vma);
# 607 "./include/linux/mm.h"
struct page *(*find_special_page)(struct vm_area_struct *vma,
unsigned long addr);
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
{
static const struct vm_operations_struct dummy_vm_ops = {};

memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm;
vma->vm_ops = &dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vma_set_anonymous(struct vm_area_struct *vma)
{
vma->vm_ops = ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool vma_is_anonymous(struct vm_area_struct *vma)
{
return !vma->vm_ops;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool vma_is_temporary_stack(struct vm_area_struct *vma)
{
int maybe_stack = vma->vm_flags & (0x00000100 | 0x00000000);

if (!maybe_stack)
return false;

if ((vma->vm_flags & (0x00010000 | 0x00008000)) ==
(0x00010000 | 0x00008000))
return true;

return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool vma_is_foreign(struct vm_area_struct *vma)
{
if (!get_current()->mm)
return true;

if (get_current()->mm != vma->vm_mm)
return true;

return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool vma_is_accessible(struct vm_area_struct *vma)
{
return vma->vm_flags & (0x00000001 | 0x00000002 | 0x00000004);
}






bool vma_is_shmem(struct vm_area_struct *vma);




int vma_is_stack_for_current(struct vm_area_struct *vma);




struct mmu_gather;
struct inode;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int compound_order(struct page *page)
{
if (!PageHead(page))
return 0;
return page[1].compound_order;
}
# 695 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int folio_order(struct folio *folio)
{
return compound_order(&folio->page);
}


# 1 "./include/linux/huge_mm.h" 1




# 1 "./include/linux/sched/coredump.h" 1
# 17 "./include/linux/sched/coredump.h"
extern void set_dumpable(struct mm_struct *mm, int value);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __get_dumpable(unsigned long mm_flags)
{
return mm_flags & ((1 << 2) - 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_dumpable(struct mm_struct *mm)
{
return __get_dumpable(mm->flags);
}
# 6 "./include/linux/huge_mm.h" 2




vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
void huge_pmd_set_accessed(struct vm_fault *vmf);
int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
struct vm_area_struct *vma);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
}


vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd,
unsigned int flags);
bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, unsigned long next);
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr);
int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
unsigned long addr);
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
pgprot_t newprot, unsigned long cp_flags);
vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
pgprot_t pgprot, bool write);
# 55 "./include/linux/huge_mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
bool write)
{
return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
}
vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
pgprot_t pgprot, bool write);
# 74 "./include/linux/huge_mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
bool write)
{
return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
}

enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_NEVER_DAX,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
};

struct kobject;
struct kobj_attribute;

ssize_t single_hugepage_flag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count,
enum transparent_hugepage_flag flag);
ssize_t single_hugepage_flag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf,
enum transparent_hugepage_flag flag);
extern struct kobj_attribute shmem_enabled_attr;
# 116 "./include/linux/huge_mm.h"
extern unsigned long transparent_hugepage_flags;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool transhuge_vma_suitable(struct vm_area_struct *vma,
unsigned long haddr)
{

if (!vma_is_anonymous(vma)) {
if (!((((vma->vm_start >> (12)) - vma->vm_pgoff) & ((typeof((vma->vm_start >> (12)) - vma->vm_pgoff))((1<<(21 -(12)))) - 1)) == 0))

return false;
}

if (haddr < vma->vm_start || haddr + ((1UL) << 21) > vma->vm_end)
return false;
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool transhuge_vma_enabled(struct vm_area_struct *vma,
unsigned long vm_flags)
{

if ((vm_flags & 0x40000000) ||
arch_test_bit(24, &vma->vm_mm->flags))
return false;
return true;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
{




if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
return false;

if (!transhuge_vma_enabled(vma, vma->vm_flags))
return false;

if (vma_is_temporary_stack(vma))
return false;

if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
return true;

if (vma_is_dax(vma))
return true;

if (transparent_hugepage_flags &
(1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
return !!(vma->vm_flags & 0x20000000);

return false;
}

bool transparent_hugepage_active(struct vm_area_struct *vma);





unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);

void prep_transhuge_page(struct page *page);
void free_transhuge_page(struct page *page);

bool can_split_folio(struct folio *folio, int *pextra_pins);
int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int split_huge_page(struct page *page)
{
return split_huge_page_to_list(page, ((void *)0));
}
void deferred_split_huge_page(struct page *page);

void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct folio *folio);
# 208 "./include/linux/huge_mm.h"
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
bool freeze, struct folio *folio);

void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
# 222 "./include/linux/huge_mm.h"
int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
int advice);
void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
unsigned long end, long adjust_next);
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int is_swap_pmd(pmd_t pmd)
{
return !pmd_none(pmd) && !pmd_present(pmd);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
else
return ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) spinlock_t *pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma)
{
if (pud_trans_huge(*pud) || pud_devmap(*pud))
return __pud_trans_huge_lock(pud, vma);
else
return ((void *)0);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_test_pmd_mappable(struct folio *folio)
{
return folio_order(folio) >= (21 -(12));
}

struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, int flags, struct dev_pagemap **pgmap);

vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);

extern struct page *huge_zero_page;
extern unsigned long huge_zero_pfn;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_huge_zero_page(struct page *page)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_208(void) ; if (!((sizeof(huge_zero_page) == sizeof(char) || sizeof(huge_zero_page) == sizeof(short) || sizeof(huge_zero_page) == sizeof(int) || sizeof(huge_zero_page) == sizeof(long)) || sizeof(huge_zero_page) == sizeof(long long))) __compiletime_assert_208(); } while (0); (*(const volatile typeof( _Generic((huge_zero_page), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (huge_zero_page))) *)&(huge_zero_page)); }) == page;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_huge_zero_pmd(pmd_t pmd)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_209(void) ; if (!((sizeof(huge_zero_pfn) == sizeof(char) || sizeof(huge_zero_pfn) == sizeof(short) || sizeof(huge_zero_pfn) == sizeof(int) || sizeof(huge_zero_pfn) == sizeof(long)) || sizeof(huge_zero_pfn) == sizeof(long long))) __compiletime_assert_209(); } while (0); (*(const volatile typeof( _Generic((huge_zero_pfn), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (huge_zero_pfn))) *)&(huge_zero_pfn)); }) == pmd_pfn(pmd) && pmd_present(pmd);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_huge_zero_pud(pud_t pud)
{
return false;
}

struct page *mm_get_huge_zero_page(struct mm_struct *mm);
void mm_put_huge_zero_page(struct mm_struct *mm);



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool thp_migration_supported(void)
{
return 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct list_head *page_deferred_list(struct page *page)
{




return &page[2].deferred_list;
}
# 444 "./include/linux/huge_mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int split_folio_to_list(struct folio *folio,
struct list_head *list)
{
return split_huge_page_to_list(&folio->page, list);
}
# 701 "./include/linux/mm.h" 2
# 718 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int put_page_testzero(struct page *page)
{
do { if (__builtin_expect(!!(page_ref_count(page) == 0), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "page_ref_count(page) == 0"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mm.h"), "i" (720), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
return page_ref_dec_and_test(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int folio_put_testzero(struct folio *folio)
{
return put_page_testzero(&folio->page);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool get_page_unless_zero(struct page *page)
{
return page_ref_add_unless(page, 1, 0);
}

extern int page_is_ram(unsigned long pfn);

enum {
REGION_INTERSECTS,
REGION_DISJOINT,
REGION_MIXED,
};

int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
unsigned long desc);


struct page *vmalloc_to_page(const void *addr);
unsigned long vmalloc_to_pfn(const void *addr);
# 767 "./include/linux/mm.h"
extern bool is_vmalloc_addr(const void *x);
extern int is_vmalloc_or_module_addr(const void *x);
# 786 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int folio_entire_mapcount(struct folio *folio)
{
do { if (__builtin_expect(!!(!folio_test_large(folio)), 0)) { dump_page(&folio->page, "VM_BUG_ON_FOLIO(" "!folio_test_large(folio)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mm.h"), "i" (788), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
return atomic_read(folio_mapcount_ptr(folio)) + 1;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int compound_mapcount(struct page *page)
{
return folio_entire_mapcount((_Generic((page), const struct page *: (const struct folio *)_compound_head(page), struct page *: (struct folio *)_compound_head(page))));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_mapcount_reset(struct page *page)
{
atomic_set(&(page)->_mapcount, -1);
}

int __page_mapcount(struct page *page);
# 822 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_mapcount(struct page *page)
{
if (__builtin_expect(!!(PageCompound(page)), 0))
return __page_mapcount(page);
return atomic_read(&page->_mapcount) + 1;
}

int folio_mapcount(struct folio *folio);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int total_mapcount(struct page *page)
{
return folio_mapcount((_Generic((page), const struct page *: (const struct folio *)_compound_head(page), struct page *: (struct folio *)_compound_head(page))));
}
# 844 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *virt_to_head_page(const void *x)
{
struct page *page = ((((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (((((({ unsigned long _x = (unsigned long)(x); ((_x) >= kernel_map.page_offset && (!1 || (_x) < kernel_map.page_offset + (((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2))) ? ((unsigned long)(_x) - kernel_map.va_pa_offset) : ({ unsigned long _y = _x; (0 && _y < kernel_map.virt_addr + 0) ? ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - 0); }); })) >> (12)))))));

return ((typeof(page))_compound_head(page));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct folio *virt_to_folio(const void *x)
{
struct page *page = ((((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (((((({ unsigned long _x = (unsigned long)(x); ((_x) >= kernel_map.page_offset && (!1 || (_x) < kernel_map.page_offset + (((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2))) ? ((unsigned long)(_x) - kernel_map.va_pa_offset) : ({ unsigned long _y = _x; (0 && _y < kernel_map.virt_addr + 0) ? ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - 0); }); })) >> (12)))))));

return (_Generic((page), const struct page *: (const struct folio *)_compound_head(page), struct page *: (struct folio *)_compound_head(page)));
}

void __put_page(struct page *page);

void put_pages_list(struct list_head *pages);

void split_page(struct page *page, unsigned int order);
void folio_copy(struct folio *dst, struct folio *src);

unsigned long nr_free_buffer_pages(void);






typedef void compound_page_dtor(struct page *);


enum compound_dtor_id {
NULL_COMPOUND_DTOR,
COMPOUND_PAGE_DTOR,




TRANSHUGE_PAGE_DTOR,

NR_COMPOUND_DTORS,
};
extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_compound_page_dtor(struct page *page,
enum compound_dtor_id compound_dtor)
{
do { if (__builtin_expect(!!(compound_dtor >= NR_COMPOUND_DTORS), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "compound_dtor >= NR_COMPOUND_DTORS"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mm.h"), "i" (891), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
page[1].compound_dtor = compound_dtor;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void destroy_compound_page(struct page *page)
{
do { if (__builtin_expect(!!(page[1].compound_dtor >= NR_COMPOUND_DTORS), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "page[1].compound_dtor >= NR_COMPOUND_DTORS"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mm.h"), "i" (897), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
compound_page_dtors[page[1].compound_dtor](page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int head_compound_pincount(struct page *head)
{
return atomic_read(compound_pincount_ptr(head));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_compound_order(struct page *page, unsigned int order)
{
page[1].compound_order = order;

page[1].compound_nr = 1U << order;

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long compound_nr(struct page *page)
{
if (!PageHead(page))
return 1;

return page[1].compound_nr;



}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long page_size(struct page *page)
{
return ((1UL) << (12)) << compound_order(page);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int page_shift(struct page *page)
{
return (12) + compound_order(page);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int thp_order(struct page *page)
{
do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mm.h"), "i" (944), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
return compound_order(page);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int thp_nr_pages(struct page *page)
{
do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mm.h"), "i" (954), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
return compound_nr(page);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long thp_size(struct page *page)
{
return ((1UL) << (12)) << thp_order(page);
}

void free_compound_page(struct page *page);
# 978 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
if (__builtin_expect(!!(vma->vm_flags & 0x00000002), 1))
pte = pte_mkwrite(pte);
return pte;
}

vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);

vm_fault_t finish_fault(struct vm_fault *vmf);
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
# 1095 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) enum zone_type page_zonenum(const struct page *page)
{
do { kcsan_set_access_mask(((1UL << 2) - 1) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0))); __kcsan_check_access(&(page->flags), sizeof(page->flags), (1 << 3)); kcsan_set_access_mask(0); kcsan_atomic_next(1); } while (0);
return (page->flags >> (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0))) & ((1UL << 2) - 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) enum zone_type folio_zonenum(const struct folio *folio)
{
return page_zonenum(&folio->page);
}
# 1114 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_zone_device_page(const struct page *page)
{
return false;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_is_zone_device(const struct folio *folio)
{
return is_zone_device_page(&folio->page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_zone_movable_page(const struct page *page)
{
return page_zonenum(page) == ZONE_MOVABLE;
}
# 1144 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool put_devmap_managed_page(struct page *page)
{
return false;
}
# 1162 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_get(struct folio *folio)
{
do { if (__builtin_expect(!!(((unsigned int) folio_ref_count(folio) + 127u <= 127u)), 0)) { dump_page(&folio->page, "VM_BUG_ON_FOLIO(" "((unsigned int) folio_ref_count(folio) + 127u <= 127u)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mm.h"), "i" (1164), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
folio_ref_inc(folio);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void get_page(struct page *page)
{
folio_get((_Generic((page), const struct page *: (const struct folio *)_compound_head(page), struct page *: (struct folio *)_compound_head(page))));
}

bool __attribute__((__warn_unused_result__)) try_grab_page(struct page *page, unsigned int flags);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) bool try_get_page(struct page *page)
{
page = ((typeof(page))_compound_head(page));
if (({ int __ret_warn_on = !!(page_ref_count(page) <= 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mm.h"), "i" (1178), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }))
return false;
page_ref_inc(page);
return true;
}
# 1197 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_put(struct folio *folio)
{
if (folio_put_testzero(folio))
__put_page(&folio->page);
}
# 1217 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_put_refs(struct folio *folio, int refs)
{
if (folio_ref_sub_and_test(folio, refs))
__put_page(&folio->page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_page(struct page *page)
{
struct folio *folio = (_Generic((page), const struct page *: (const struct folio *)_compound_head(page), struct page *: (struct folio *)_compound_head(page)));





if (put_devmap_managed_page(&folio->page))
return;
folio_put(folio);
}
# 1268 "./include/linux/mm.h"
void unpin_user_page(struct page *page);
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty);
void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
bool make_dirty);
void unpin_user_pages(struct page **pages, unsigned long npages);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_cow_mapping(vm_flags_t flags)
{
return (flags & (0x00000008 | 0x00000020)) == 0x00000020;
}
# 1292 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_zone_id(struct page *page)
{
return (page->flags >> ((((((sizeof(unsigned long)*8) - 0) - 0) < ((((sizeof(unsigned long)*8) - 0) - 0) - 2))? (((sizeof(unsigned long)*8) - 0) - 0) : ((((sizeof(unsigned long)*8) - 0) - 0) - 2)) * ((0 + 2) != 0))) & ((1UL << (0 + 2)) - 1);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_to_nid(const struct page *page)
{
struct page *p = (struct page *)page;

return (({ do { if (__builtin_expect(!!(PagePoisoned(p)), 0)) { dump_page(p, "VM_BUG_ON_PAGE(" "PagePoisoned(p)"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mm.h"), "i" (1304), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0); p; })->flags >> ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0))) & ((1UL << 0) - 1);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int folio_nid(const struct folio *folio)
{
return page_to_nid(&folio->page);
}
# 1378 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_cpupid_xchg_last(struct page *page, int cpupid)
{
return page_to_nid(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int page_cpupid_last(struct page *page)
{
return page_to_nid(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpupid_to_nid(int cpupid)
{
return -1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpupid_to_pid(int cpupid)
{
return -1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpupid_to_cpu(int cpupid)
{
return -1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpu_pid_to_cpupid(int nid, int pid)
{
return -1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpupid_pid_unset(int cpupid)
{
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_cpupid_reset_last(struct page *page)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpupid_match_pid(struct task_struct *task, int cpupid)
{
return false;
}
# 1467 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 page_kasan_tag(const struct page *page)
{
return 0xff;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_kasan_tag_set(struct page *page, u8 tag) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_kasan_tag_reset(struct page *page) { }



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct zone *page_zone(const struct page *page)
{
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pg_data_t *page_pgdat(const struct page *page)
{
return NODE_DATA(page_to_nid(page));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct zone *folio_zone(const struct folio *folio)
{
return page_zone(&folio->page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pg_data_t *folio_pgdat(const struct folio *folio)
{
return page_pgdat(&folio->page);
}
# 1519 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long folio_pfn(struct folio *folio)
{
return (unsigned long)((&folio->page) - ((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) atomic_t *folio_pincount_ptr(struct folio *folio)
{
return &((&(folio)->page) + (1))->compound_pincount;
}
# 1554 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_maybe_dma_pinned(struct folio *folio)
{
if (folio_test_large(folio))
return atomic_read(folio_pincount_ptr(folio)) > 0;
# 1567 "./include/linux/mm.h"
return ((unsigned int)folio_ref_count(folio)) >=
(1U << 10);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool page_maybe_dma_pinned(struct page *page)
{
return folio_maybe_dma_pinned((_Generic((page), const struct page *: (const struct folio *)_compound_head(page), struct page *: (struct folio *)_compound_head(page))));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool page_needs_cow_for_dma(struct vm_area_struct *vma,
struct page *page)
{
if (!is_cow_mapping(vma->vm_flags))
return false;

if (!arch_test_bit(28, &vma->vm_mm->flags))
return false;

return page_maybe_dma_pinned(page);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_pinnable_page(struct page *page)
{
return !(is_zone_movable_page(page) || (get_pfnblock_flags_mask(page, (unsigned long)((page) - ((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) )))))))), ((1UL << 3) - 1)) == MIGRATE_CMA)) ||
is_zero_pfn((unsigned long)((page) - ((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) )))))))));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_is_pinnable(struct folio *folio)
{
return is_pinnable_page(&folio->page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_page_zone(struct page *page, enum zone_type zone)
{
page->flags &= ~(((1UL << 2) - 1) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0)));
page->flags |= (zone & ((1UL << 2) - 1)) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_page_node(struct page *page, unsigned long node)
{
page->flags &= ~(((1UL << 0) - 1) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0)));
page->flags |= (node & ((1UL << 0) - 1)) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_page_links(struct page *page, enum zone_type zone,
unsigned long node, unsigned long pfn)
{
set_page_zone(page, zone);
set_page_node(page, node);



}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long folio_nr_pages(struct folio *folio)
{
return compound_nr(&folio->page);
}
# 1658 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct folio *folio_next(struct folio *folio)
{
return (struct folio *)((&(folio)->page) + (folio_nr_pages(folio)));
}
# 1675 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int folio_shift(struct folio *folio)
{
return (12) + folio_order(folio);
}
# 1688 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) size_t folio_size(struct folio *folio)
{
return ((1UL) << (12)) << folio_order(folio);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_make_page_accessible(struct page *page)
{
return 0;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_make_folio_accessible(struct folio *folio)
{
int ret;
long i, nr = folio_nr_pages(folio);

for (i = 0; i < nr; i++) {
ret = arch_make_page_accessible(((&(folio)->page) + (i)));
if (ret)
break;
}

return ret;
}






# 1 "./include/linux/vmstat.h" 1







# 1 "./include/linux/vm_event_item.h" 1
# 25 "./include/linux/vm_event_item.h"
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGALLOC_DMA32, PGALLOC_NORMAL, PGALLOC_MOVABLE,
ALLOCSTALL_DMA32, ALLOCSTALL_NORMAL, ALLOCSTALL_MOVABLE,
PGSCAN_SKIP_DMA32, PGSCAN_SKIP_NORMAL, PGSCAN_SKIP_MOVABLE,
PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE,
PGFAULT, PGMAJFAULT,
PGLAZYFREED,
PGREFILL,
PGREUSE,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGDEMOTE_KSWAPD,
PGDEMOTE_DIRECT,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_DIRECT_THROTTLE,
PGSCAN_ANON,
PGSCAN_FILE,
PGSTEAL_ANON,
PGSTEAL_FILE,



PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
PAGEOUTRUN, PGROTATED,
DROP_PAGECACHE, DROP_SLAB,
OOM_KILL,
# 61 "./include/linux/vm_event_item.h"
PGMIGRATE_SUCCESS, PGMIGRATE_FAIL,
THP_MIGRATION_SUCCESS,
THP_MIGRATION_FAIL,
THP_MIGRATION_SPLIT,


COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
COMPACTISOLATED,
COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
KCOMPACTD_WAKE,
KCOMPACTD_MIGRATE_SCANNED, KCOMPACTD_FREE_SCANNED,





CMA_ALLOC_SUCCESS,
CMA_ALLOC_FAIL,

UNEVICTABLE_PGCULLED,
UNEVICTABLE_PGSCANNED,
UNEVICTABLE_PGRESCUED,
UNEVICTABLE_PGMLOCKED,
UNEVICTABLE_PGMUNLOCKED,
UNEVICTABLE_PGCLEARED,
UNEVICTABLE_PGSTRANDED,

THP_FAULT_ALLOC,
THP_FAULT_FALLBACK,
THP_FAULT_FALLBACK_CHARGE,
THP_COLLAPSE_ALLOC,
THP_COLLAPSE_ALLOC_FAILED,
THP_FILE_ALLOC,
THP_FILE_FALLBACK,
THP_FILE_FALLBACK_CHARGE,
THP_FILE_MAPPED,
THP_SPLIT_PAGE,
THP_SPLIT_PAGE_FAILED,
THP_DEFERRED_SPLIT_PAGE,
THP_SPLIT_PMD,
THP_SCAN_EXCEED_NONE_PTE,
THP_SCAN_EXCEED_SWAP_PTE,
THP_SCAN_EXCEED_SHARED_PTE,



THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
THP_SWPOUT,
THP_SWPOUT_FALLBACK,


BALLOON_INFLATE,
BALLOON_DEFLATE,

BALLOON_MIGRATE,
# 130 "./include/linux/vm_event_item.h"
SWAP_RA,
SWAP_RA_HIT,
# 140 "./include/linux/vm_event_item.h"
NR_VM_EVENT_ITEMS
};
# 9 "./include/linux/vmstat.h" 2

# 1 "./include/linux/static_key.h" 1
# 11 "./include/linux/vmstat.h" 2


extern int sysctl_stat_interval;
# 24 "./include/linux/vmstat.h"
struct reclaim_stat {
unsigned nr_dirty;
unsigned nr_unqueued_dirty;
unsigned nr_congested;
unsigned nr_writeback;
unsigned nr_immediate;
unsigned nr_pageout;
unsigned nr_activate[2];
unsigned nr_ref_keep;
unsigned nr_unmap_fail;
unsigned nr_lazyfree_fail;
};

enum writeback_stat_item {
NR_DIRTY_THRESHOLD,
NR_DIRTY_BG_THRESHOLD,
NR_VM_WRITEBACK_STAT_ITEMS,
};
# 54 "./include/linux/vmstat.h"
struct vm_event_state {
unsigned long event[NR_VM_EVENT_ITEMS];
};

extern __attribute__((section(".data..percpu" ""))) __typeof__(struct vm_event_state) vm_event_states;





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __count_vm_event(enum vm_event_item item)
{
do { do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(vm_event_states.event[item])) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void count_vm_event(enum vm_event_item item)
{
do { do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(vm_event_states.event[item])) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __count_vm_events(enum vm_event_item item, long delta)
{
do { do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(vm_event_states.event[item])) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += delta; } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += delta; } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += delta; } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += delta; } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void count_vm_events(enum vm_event_item item, long delta)
{
do { do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(vm_event_states.event[item])) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += delta; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += delta; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += delta; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item]))); (typeof((typeof(*(&(vm_event_states.event[item]))) *)(&(vm_event_states.event[item])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += delta; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

extern void all_vm_events(unsigned long *);

extern void vm_events_fold_cpu(int cpu);
# 140 "./include/linux/vmstat.h"
extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
extern atomic_long_t vm_numa_event[0];
# 165 "./include/linux/vmstat.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void zone_page_state_add(long x, struct zone *zone,
enum zone_stat_item item)
{
atomic_long_add(x, &zone->vm_stat[item]);
atomic_long_add(x, &vm_zone_stat[item]);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void node_page_state_add(long x, struct pglist_data *pgdat,
enum node_stat_item item)
{
atomic_long_add(x, &pgdat->vm_stat[item]);
atomic_long_add(x, &vm_node_stat[item]);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long global_zone_page_state(enum zone_stat_item item)
{
long x = atomic_long_read(&vm_zone_stat[item]);

if (x < 0)
x = 0;

return x;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
unsigned long global_node_page_state_pages(enum node_stat_item item)
{
long x = atomic_long_read(&vm_node_stat[item]);

if (x < 0)
x = 0;

return x;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long global_node_page_state(enum node_stat_item item)
{
(void)({ int __ret_warn_on = !!(vmstat_item_in_bytes(item)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/vmstat.h"), "i" (202), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });

return global_node_page_state_pages(item);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long zone_page_state(struct zone *zone,
enum zone_stat_item item)
{
long x = atomic_long_read(&zone->vm_stat[item]);

if (x < 0)
x = 0;

return x;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long zone_page_state_snapshot(struct zone *zone,
enum zone_stat_item item)
{
long x = atomic_long_read(&zone->vm_stat[item]);


int cpu;
for (((cpu)) = -1; ((cpu)) = cpumask_next(((cpu)), (((const struct cpumask *)&__cpu_online_mask))), ((cpu)) < nr_cpu_ids;)
x += ({ do { const void *__vpp_verify = (typeof((zone->per_cpu_zonestats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((zone->per_cpu_zonestats))) *)((zone->per_cpu_zonestats))); (typeof((typeof(*((zone->per_cpu_zonestats))) *)((zone->per_cpu_zonestats)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->vm_stat_diff[item];

if (x < 0)
x = 0;

return x;
}
# 270 "./include/linux/vmstat.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fold_vm_numa_events(void)
{
}



void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
void __inc_zone_page_state(struct page *, enum zone_stat_item);
void __dec_zone_page_state(struct page *, enum zone_stat_item);

void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
void __inc_node_page_state(struct page *, enum node_stat_item);
void __dec_node_page_state(struct page *, enum node_stat_item);

void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
void inc_zone_page_state(struct page *, enum zone_stat_item);
void dec_zone_page_state(struct page *, enum zone_stat_item);

void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
void inc_node_page_state(struct page *, enum node_stat_item);
void dec_node_page_state(struct page *, enum node_stat_item);

extern void inc_node_state(struct pglist_data *, enum node_stat_item);
extern void __inc_zone_state(struct zone *, enum zone_stat_item);
extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
extern void dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_node_state(struct pglist_data *, enum node_stat_item);

void quiet_vmstat(void);
void cpu_vm_stats_fold(int cpu);
void refresh_zone_stat_thresholds(void);

struct ctl_table;
int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
loff_t *ppos);

void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);

int calculate_pressure_threshold(struct zone *zone);
int calculate_normal_threshold(struct zone *zone);
void set_pgdat_percpu_threshold(pg_data_t *pgdat,
int (*calculate_pressure)(struct zone *));
# 418 "./include/linux/vmstat.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __zone_stat_mod_folio(struct folio *folio,
enum zone_stat_item item, long nr)
{
__mod_zone_page_state(folio_zone(folio), item, nr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __zone_stat_add_folio(struct folio *folio,
enum zone_stat_item item)
{
__mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __zone_stat_sub_folio(struct folio *folio,
enum zone_stat_item item)
{
__mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void zone_stat_mod_folio(struct folio *folio,
enum zone_stat_item item, long nr)
{
mod_zone_page_state(folio_zone(folio), item, nr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void zone_stat_add_folio(struct folio *folio,
enum zone_stat_item item)
{
mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void zone_stat_sub_folio(struct folio *folio,
enum zone_stat_item item)
{
mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __node_stat_mod_folio(struct folio *folio,
enum node_stat_item item, long nr)
{
__mod_node_page_state(folio_pgdat(folio), item, nr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __node_stat_add_folio(struct folio *folio,
enum node_stat_item item)
{
__mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __node_stat_sub_folio(struct folio *folio,
enum node_stat_item item)
{
__mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void node_stat_mod_folio(struct folio *folio,
enum node_stat_item item, long nr)
{
mod_node_page_state(folio_pgdat(folio), item, nr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void node_stat_add_folio(struct folio *folio,
enum node_stat_item item)
{
mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void node_stat_sub_folio(struct folio *folio,
enum node_stat_item item)
{
mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
int migratetype)
{
__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
if (__builtin_expect(!!((migratetype) == MIGRATE_CMA), 0))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
}

extern const char * const vmstat_text[];

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *zone_stat_name(enum zone_stat_item item)
{
return vmstat_text[item];
}
# 513 "./include/linux/vmstat.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *node_stat_name(enum node_stat_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
0 +
item];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *lru_list_name(enum lru_list lru)
{
return node_stat_name(NR_LRU_BASE + lru) + 3;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *writeback_stat_name(enum writeback_stat_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
0 +
NR_VM_NODE_STAT_ITEMS +
item];
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *vm_event_name(enum vm_event_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
0 +
NR_VM_NODE_STAT_ITEMS +
NR_VM_WRITEBACK_STAT_ITEMS +
item];
}
# 574 "./include/linux/vmstat.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
__mod_node_page_state(page_pgdat(page), idx, val);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
mod_node_page_state(page_pgdat(page), idx, val);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __inc_lruvec_page_state(struct page *page,
enum node_stat_item idx)
{
__mod_lruvec_page_state(page, idx, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __dec_lruvec_page_state(struct page *page,
enum node_stat_item idx)
{
__mod_lruvec_page_state(page, idx, -1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val)
{
__mod_lruvec_page_state(&folio->page, idx, val);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __lruvec_stat_add_folio(struct folio *folio,
enum node_stat_item idx)
{
__lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __lruvec_stat_sub_folio(struct folio *folio,
enum node_stat_item idx)
{
__lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inc_lruvec_page_state(struct page *page,
enum node_stat_item idx)
{
mod_lruvec_page_state(page, idx, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dec_lruvec_page_state(struct page *page,
enum node_stat_item idx)
{
mod_lruvec_page_state(page, idx, -1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val)
{
mod_lruvec_page_state(&folio->page, idx, val);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lruvec_stat_add_folio(struct folio *folio,
enum node_stat_item idx)
{
lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lruvec_stat_sub_folio(struct folio *folio,
enum node_stat_item idx)
{
lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
}
# 1720 "./include/linux/mm.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void *lowmem_page_address(const struct page *page)
{
return ((((void *)((void *)((unsigned long)((phys_addr_t)((((phys_addr_t)((unsigned long)((page) - ((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))))) << (12))))) + kernel_map.va_pa_offset)))));
}
# 1754 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *folio_address(const struct folio *folio)
{
return lowmem_page_address(&folio->page);
}

extern void *page_rmapping(struct page *page);
extern unsigned long __page_file_index(struct page *page);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long page_index(struct page *page)
{
if (__builtin_expect(!!(PageSwapCache(page)), 0))
return __page_file_index(page);
return page->index;
}

bool page_mapped(struct page *page);
bool folio_mapped(struct folio *folio);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool page_is_pfmemalloc(const struct page *page)
{





return (uintptr_t)page->lru.next & ((((1UL))) << (1));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_page_pfmemalloc(struct page *page)
{
page->lru.next = (void *)((((1UL))) << (1));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_page_pfmemalloc(struct page *page)
{
page->lru.next = ((void *)0);
}




extern void pagefault_out_of_memory(void);
# 1820 "./include/linux/mm.h"
extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);


extern bool can_do_mlock(void);



extern int user_shm_lock(size_t, struct ucounts *);
extern void user_shm_unlock(size_t, struct ucounts *);

struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);

void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long start, unsigned long end);

struct mmu_notifier_range;

void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
struct mmu_notifier_range *range, pte_t **ptepp,
pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
unsigned int flags, unsigned long *prot, resource_size_t *phys);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);

extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int generic_error_remove_page(struct address_space *mapping, struct page *page);


extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
struct pt_regs *regs);
extern int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
void unmap_mapping_pages(struct address_space *mapping,
unsigned long start, unsigned long nr, bool even_cows);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
# 1899 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void unmap_shared_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen)
{
unmap_mapping_range(mapping, holebegin, holelen, 0);
}

extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);

long get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked);
long pin_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked);
long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long pin_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);

int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
int pin_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);

int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
struct task_struct *task, bool bypass_rlim);

struct kvec;
int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
struct page **pages);
struct page *get_dump_page(unsigned long addr);

bool folio_mark_dirty(struct folio *folio);
bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);

int get_cmdline(struct task_struct *task, char *buffer, int buflen);

extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len,
bool need_rmap_locks);
# 1972 "./include/linux/mm.h"
extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgprot_t newprot,
unsigned long cp_flags);
extern int mprotect_fixup(struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);




int get_user_pages_fast_only(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
int pin_user_pages_fast_only(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool get_user_page_fast_only(unsigned long addr,
unsigned int gup_flags, struct page **pagep)
{
return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long get_mm_counter(struct mm_struct *mm, int member)
{
long val = atomic_long_read(&mm->rss_stat.count[member]);






if (val < 0)
val = 0;

return (unsigned long)val;
}

void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void add_mm_counter(struct mm_struct *mm, int member, long value)
{
long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);

mm_trace_rss_stat(mm, member, count);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inc_mm_counter(struct mm_struct *mm, int member)
{
long count = atomic_long_inc_return(&mm->rss_stat.count[member]);

mm_trace_rss_stat(mm, member, count);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dec_mm_counter(struct mm_struct *mm, int member)
{
long count = atomic_long_dec_return(&mm->rss_stat.count[member]);

mm_trace_rss_stat(mm, member, count);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mm_counter_file(struct page *page)
{
if (PageSwapBacked(page))
return MM_SHMEMPAGES;
return MM_FILEPAGES;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mm_counter(struct page *page)
{
if (PageAnon(page))
return MM_ANONPAGES;
return mm_counter_file(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long get_mm_rss(struct mm_struct *mm)
{
return get_mm_counter(mm, MM_FILEPAGES) +
get_mm_counter(mm, MM_ANONPAGES) +
get_mm_counter(mm, MM_SHMEMPAGES);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
{
return __builtin_choose_expr(((!!(sizeof((typeof(mm->hiwater_rss) *)1 == (typeof(get_mm_rss(mm)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(mm->hiwater_rss) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(get_mm_rss(mm)) * 0l)) : (int *)8))))), ((mm->hiwater_rss) > (get_mm_rss(mm)) ? (mm->hiwater_rss) : (get_mm_rss(mm))), ({ typeof(mm->hiwater_rss) __UNIQUE_ID___x210 = (mm->hiwater_rss); typeof(get_mm_rss(mm)) __UNIQUE_ID___y211 = (get_mm_rss(mm)); ((__UNIQUE_ID___x210) > (__UNIQUE_ID___y211) ? (__UNIQUE_ID___x210) : (__UNIQUE_ID___y211)); }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
{
return __builtin_choose_expr(((!!(sizeof((typeof(mm->hiwater_vm) *)1 == (typeof(mm->total_vm) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(mm->hiwater_vm) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(mm->total_vm) * 0l)) : (int *)8))))), ((mm->hiwater_vm) > (mm->total_vm) ? (mm->hiwater_vm) : (mm->total_vm)), ({ typeof(mm->hiwater_vm) __UNIQUE_ID___x212 = (mm->hiwater_vm); typeof(mm->total_vm) __UNIQUE_ID___y213 = (mm->total_vm); ((__UNIQUE_ID___x212) > (__UNIQUE_ID___y213) ? (__UNIQUE_ID___x212) : (__UNIQUE_ID___y213)); }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void update_hiwater_rss(struct mm_struct *mm)
{
unsigned long _rss = get_mm_rss(mm);

if ((mm)->hiwater_rss < _rss)
(mm)->hiwater_rss = _rss;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void update_hiwater_vm(struct mm_struct *mm)
{
if (mm->hiwater_vm < mm->total_vm)
mm->hiwater_vm = mm->total_vm;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void reset_mm_hiwater_rss(struct mm_struct *mm)
{
mm->hiwater_rss = get_mm_rss(mm);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void setmax_mm_hiwater_rss(unsigned long *maxrss,
struct mm_struct *mm)
{
unsigned long hiwater_rss = get_mm_hiwater_rss(mm);

if (*maxrss < hiwater_rss)
*maxrss = hiwater_rss;
}


void sync_mm_rss(struct mm_struct *mm);
# 2114 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pte_devmap(pte_t pte)
{
return 0;
}


int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);

extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
pte_t *ptep;
(ptep = __get_locked_pte(mm, addr, ptl));
return ptep;
}
# 2139 "./include/linux/mm.h"
int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
# 2152 "./include/linux/mm.h"
int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mm_inc_nr_puds(struct mm_struct *mm)
{
if (mm_pud_folded(mm))
return;
atomic_long_add((((1UL) << (12)) / sizeof(pud_t)) * sizeof(pud_t), &mm->pgtables_bytes);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mm_dec_nr_puds(struct mm_struct *mm)
{
if (mm_pud_folded(mm))
return;
atomic_long_sub((((1UL) << (12)) / sizeof(pud_t)) * sizeof(pud_t), &mm->pgtables_bytes);
}
# 2180 "./include/linux/mm.h"
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mm_inc_nr_pmds(struct mm_struct *mm)
{
if (0)
return;
atomic_long_add((((1UL) << (12)) / sizeof(pmd_t)) * sizeof(pmd_t), &mm->pgtables_bytes);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mm_dec_nr_pmds(struct mm_struct *mm)
{
if (0)
return;
atomic_long_sub((((1UL) << (12)) / sizeof(pmd_t)) * sizeof(pmd_t), &mm->pgtables_bytes);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mm_pgtables_bytes_init(struct mm_struct *mm)
{
atomic_long_set(&mm->pgtables_bytes, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
{
return atomic_long_read(&mm->pgtables_bytes);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mm_inc_nr_ptes(struct mm_struct *mm)
{
atomic_long_add((((1UL) << (12)) / sizeof(pte_t)) * sizeof(pte_t), &mm->pgtables_bytes);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mm_dec_nr_ptes(struct mm_struct *mm)
{
atomic_long_sub((((1UL) << (12)) / sizeof(pte_t)) * sizeof(pte_t), &mm->pgtables_bytes);
}
# 2229 "./include/linux/mm.h"
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
int __pte_alloc_kernel(pmd_t *pmd);



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
unsigned long address)
{
return (__builtin_expect(!!(pgd_none(*pgd)), 0) && __p4d_alloc(mm, pgd, address)) ?
((void *)0) : p4d_offset(pgd, address);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
unsigned long address)
{
return (__builtin_expect(!!(p4d_none(*p4d)), 0) && __pud_alloc(mm, p4d, address)) ?
((void *)0) : pud_offset(p4d, address);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
return (__builtin_expect(!!(pud_none(*pud)), 0) && __pmd_alloc(mm, pud, address))?
((void *)0): pmd_offset(pud, address);
}




void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) ptlock_cache_init(void);
extern bool ptlock_alloc(struct page *page);
extern void ptlock_free(struct page *page);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) spinlock_t *ptlock_ptr(struct page *page)
{
return page->ptl;
}
# 2285 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return ptlock_ptr(pmd_page(*pmd));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ptlock_init(struct page *page)
{







do { if (__builtin_expect(!!(*(unsigned long *)&page->ptl), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "*(unsigned long *)&page->ptl"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mm.h"), "i" (2299), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);
if (!ptlock_alloc(page))
return false;
do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(ptlock_ptr(page)), "ptlock_ptr(page)", &__key, LD_WAIT_CONFIG); } while (0);
return true;
}
# 2319 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pgtable_init(void)
{
ptlock_cache_init();
pgtable_cache_init();
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pgtable_pte_page_ctor(struct page *page)
{
if (!ptlock_init(page))
return false;
__SetPageTable(page);
inc_lruvec_page_state(page, NR_PAGETABLE);
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pgtable_pte_page_dtor(struct page *page)
{
ptlock_free(page);
__ClearPageTable(page);
dec_lruvec_page_state(page, NR_PAGETABLE);
}
# 2370 "./include/linux/mm.h"
static struct page *pmd_to_page(pmd_t *pmd)
{
unsigned long mask = ~((((1UL) << (12)) / sizeof(pmd_t)) * sizeof(pmd_t) - 1);
return ((((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (((((({ unsigned long _x = (unsigned long)((void *)((unsigned long) pmd & mask)); ((_x) >= kernel_map.page_offset && (!1 || (_x) < kernel_map.page_offset + (((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2))) ? ((unsigned long)(_x) - kernel_map.va_pa_offset) : ({ unsigned long _y = _x; (0 && _y < kernel_map.virt_addr + 0) ? ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - 0); }); })) >> (12)))))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return ptlock_ptr(pmd_to_page(pmd));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pmd_ptlock_init(struct page *page)
{

page->pmd_huge_pte = ((void *)0);

return ptlock_init(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pmd_ptlock_free(struct page *page)
{

do { if (__builtin_expect(!!(page->pmd_huge_pte), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "page->pmd_huge_pte"")"); do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mm.h"), "i" (2392), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } } while (0);

ptlock_free(page);
}
# 2413 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
{
spinlock_t *ptl = pmd_lockptr(mm, pmd);
spin_lock(ptl);
return ptl;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pgtable_pmd_page_ctor(struct page *page)
{
if (!pmd_ptlock_init(page))
return false;
__SetPageTable(page);
inc_lruvec_page_state(page, NR_PAGETABLE);
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pgtable_pmd_page_dtor(struct page *page)
{
pmd_ptlock_free(page);
__ClearPageTable(page);
dec_lruvec_page_state(page, NR_PAGETABLE);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
{
return &mm->page_table_lock;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
{
spinlock_t *ptl = pud_lockptr(mm, pud);

spin_lock(ptl);
return ptl;
}

extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) pagecache_init(void);
extern void free_initmem(void);







extern unsigned long free_reserved_area(void *start, void *end,
int poison, const char *s);

extern void adjust_managed_page_count(struct page *page, long count);
extern void mem_init_print_info(void);

extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void free_reserved_page(struct page *page)
{
ClearPageReserved(page);
init_page_count(page);
__free_pages((page), 0);
adjust_managed_page_count(page, 1);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mark_page_reserved(struct page *page)
{
SetPageReserved(page);
adjust_managed_page_count(page, -1);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long free_initmem_default(int poison)
{
extern char __init_begin[], __init_end[];

return free_reserved_area(&__init_begin, &__init_end,
poison, "unused kernel image (initmem)");
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long get_num_physpages(void)
{
int nid;
unsigned long phys_pages = 0;

for ( (nid) = 0; (nid) == 0; (nid) = 1)
phys_pages += (NODE_DATA(nid)->node_present_pages);

return phys_pages;
}
# 2529 "./include/linux/mm.h"
void free_area_init(unsigned long *max_zone_pfn);
unsigned long node_map_pfn_alignment(void);
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
extern unsigned long find_min_pfn_with_active_regions(void);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int early_pfn_to_nid(unsigned long pfn)
{
return 0;
}





extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_range(unsigned long, int, unsigned long,
unsigned long, unsigned long, enum meminit_context,
struct vmem_altmap *, int migratetype);
extern void setup_per_zone_wmarks(void);
extern void calculate_min_free_kbytes(void);
extern int __attribute__((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((patchable_function_entry(0, 0))) init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) mmap_init(void);
extern void show_mem(unsigned int flags, nodemask_t *nodemask);
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);




extern __attribute__((__format__(printf, 3, 4)))
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);

extern void setup_per_cpu_pageset(void);


extern int min_free_kbytes;
extern int watermark_boost_factor;
extern int watermark_scale_factor;
extern bool arch_has_descending_max_zone_pfns(void);


extern atomic_long_t mmap_pages_allocated;
extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);


void vma_interval_tree_insert(struct vm_area_struct *node,
struct rb_root_cached *root);
void vma_interval_tree_insert_after(struct vm_area_struct *node,
struct vm_area_struct *prev,
struct rb_root_cached *root);
void vma_interval_tree_remove(struct vm_area_struct *node,
struct rb_root_cached *root);
struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
unsigned long start, unsigned long last);





void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
struct rb_root_cached *root);
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
struct rb_root_cached *root);
struct anon_vma_chain *
anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
struct anon_vma_chain *anon_vma_interval_tree_iter_next(
struct anon_vma_chain *node, unsigned long start, unsigned long last);
# 2616 "./include/linux/mm.h"
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long pgoff, struct vm_area_struct *insert,
struct vm_area_struct *expand);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int vma_adjust(struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long pgoff, struct vm_area_struct *insert)
{
return __vma_adjust(vma, start, end, pgoff, insert, ((void *)0));
}
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, unsigned long,
struct mempolicy *, struct vm_userfaultfd_ctx, struct anon_vma_name *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
unsigned long addr, int new_below);
extern int split_vma(struct mm_struct *, struct vm_area_struct *,
unsigned long addr, int new_below);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
struct rb_node **, struct rb_node *);
extern void unlink_file_vma(struct vm_area_struct *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
unsigned long addr, unsigned long len, unsigned long pgoff,
bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int check_data_rlimit(unsigned long rlim,
unsigned long new,
unsigned long start,
unsigned long end_data,
unsigned long start_data)
{
if (rlim < (~0UL)) {
if (((new - start) + (end_data - start_data)) > rlim)
return -28;
}

return 0;
}

extern int mm_take_all_locks(struct mm_struct *mm);
extern void mm_drop_all_locks(struct mm_struct *mm);

extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
extern struct file *get_task_exe_file(struct task_struct *task);

extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);

extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
const struct vm_special_mapping *sm);
extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags,
const struct vm_special_mapping *spec);

extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);

unsigned long randomize_stack_top(unsigned long stack_top);

extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);

extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
unsigned long pgoff, unsigned long *populate, struct list_head *uf);
extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf, bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);


extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mm_populate(unsigned long addr, unsigned long len)
{

(void) __mm_populate(addr, len, 1);
}





extern int __attribute__((__warn_unused_result__)) vm_brk(unsigned long, unsigned long);
extern int __attribute__((__warn_unused_result__)) vm_brk_flags(unsigned long, unsigned long, unsigned long);
extern int vm_munmap(unsigned long, size_t);
extern unsigned long __attribute__((__warn_unused_result__)) vm_mmap(struct file *, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);

struct vm_unmapped_area_info {

unsigned long flags;
unsigned long length;
unsigned long low_limit;
unsigned long high_limit;
unsigned long align_mask;
unsigned long align_offset;
};

extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);


extern void truncate_inode_pages(struct address_space *, loff_t);
extern void truncate_inode_pages_range(struct address_space *,
loff_t lstart, loff_t lend);
extern void truncate_inode_pages_final(struct address_space *);


extern vm_fault_t filemap_fault(struct vm_fault *vmf);
extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
unsigned long start_pgoff, unsigned long end_pgoff);
extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);

extern unsigned long stack_guard_gap;

extern int expand_stack(struct vm_area_struct *vma, unsigned long address);


extern int expand_downwards(struct vm_area_struct *vma,
unsigned long address);







extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
# 2766 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
unsigned long start_addr,
unsigned long end_addr)
{
struct vm_area_struct *vma = find_vma(mm, start_addr);

if (vma && end_addr <= vma->vm_start)
vma = ((void *)0);
return vma;
}
# 2785 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma = find_vma(mm, addr);

if (vma && addr < vma->vm_start)
vma = ((void *)0);

return vma;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long vm_start_gap(struct vm_area_struct *vma)
{
unsigned long vm_start = vma->vm_start;

if (vma->vm_flags & 0x00000100) {
vm_start -= stack_guard_gap;
if (vm_start > vma->vm_start)
vm_start = 0;
}
return vm_start;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long vm_end_gap(struct vm_area_struct *vma)
{
unsigned long vm_end = vma->vm_end;

if (vma->vm_flags & 0x00000000) {
vm_end += stack_guard_gap;
if (vm_end < vma->vm_end)
vm_end = -((1UL) << (12));
}
return vm_end;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long vma_pages(struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> (12);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
unsigned long vm_start, unsigned long vm_end)
{
struct vm_area_struct *vma = find_vma(mm, vm_start);

if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
vma = ((void *)0);

return vma;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool range_in_vma(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
return (vma && vma->vm_start <= start && end <= vma->vm_end);
}


pgprot_t vm_get_page_prot(unsigned long vm_flags);
void vma_set_page_prot(struct vm_area_struct *vma);
# 2857 "./include/linux/mm.h"
void vma_set_file(struct vm_area_struct *vma, struct file *file);






struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num);
int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
unsigned long addr, struct page *page)
{
int err = vm_insert_page(vma, addr, page);

if (err == -12)
return VM_FAULT_OOM;
if (err < 0 && err != -16)
return VM_FAULT_SIGBUS;

return VM_FAULT_NOPAGE;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int io_remap_pfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn,
unsigned long size, pgprot_t prot)
{
return remap_pfn_range(vma, addr, pfn, size, (prot));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) vm_fault_t vmf_error(int err)
{
if (err == -12)
return VM_FAULT_OOM;
return VM_FAULT_SIGBUS;
}

struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
unsigned int foll_flags);
# 2996 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{
if (vm_fault & VM_FAULT_OOM)
return -12;
if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
return (foll_flags & 0x100) ? -133 : -14;
if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
return -14;
return 0;
}

typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
extern int apply_to_existing_page_range(struct mm_struct *mm,
unsigned long address, unsigned long size,
pte_fn_t fn, void *data);

extern void init_mem_debugging_and_hardening(void);
# 3043 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool page_poisoning_enabled(void) { return false; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool page_poisoning_enabled_static(void) { return false; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __kernel_poison_pages(struct page *page, int nunmpages) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kernel_poison_pages(struct page *page, int numpages) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kernel_unpoison_pages(struct page *page, int numpages) { }


extern struct static_key_false init_on_alloc;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool want_init_on_alloc(gfp_t flags)
{
if ((0 ? __builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&init_on_alloc)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&init_on_alloc)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&init_on_alloc)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&init_on_alloc)->key) > 0; })), 1) : __builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&init_on_alloc)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&init_on_alloc)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&init_on_alloc)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&init_on_alloc)->key) > 0; })), 0)))

return true;
return flags & (( gfp_t)0x100u);
}

extern struct static_key_false init_on_free;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool want_init_on_free(void)
{
return (0 ? __builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&init_on_free)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&init_on_free)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&init_on_free)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&init_on_free)->key) > 0; })), 1) : __builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&init_on_free)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&init_on_free)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&init_on_free)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&init_on_free)->key) > 0; })), 0));

}

extern bool _debug_pagealloc_enabled_early;
extern struct static_key_false _debug_pagealloc_enabled;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool debug_pagealloc_enabled(void)
{
return 1 &&
_debug_pagealloc_enabled_early;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool debug_pagealloc_enabled_static(void)
{
if (!1)
return false;

return __builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&_debug_pagealloc_enabled)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&_debug_pagealloc_enabled)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&_debug_pagealloc_enabled)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&_debug_pagealloc_enabled)->key) > 0; })), 0);
}






extern void __kernel_map_pages(struct page *page, int numpages, int enable);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void debug_pagealloc_map_pages(struct page *page, int numpages)
{
if (debug_pagealloc_enabled_static())
__kernel_map_pages(page, numpages, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void debug_pagealloc_unmap_pages(struct page *page, int numpages)
{
if (debug_pagealloc_enabled_static())
__kernel_map_pages(page, numpages, 0);
}
# 3115 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int in_gate_area_no_mm(unsigned long addr) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return 0;
}


extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);


extern int sysctl_drop_caches;
int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);


void drop_slab(void);




extern int randomize_va_space;


const char * arch_vma_name(struct vm_area_struct *vma);

void print_vma_addr(char *prefix, unsigned long rip);
# 3158 "./include/linux/mm.h"
void *sparse_buffer_alloc(unsigned long size);
struct page * __populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
struct vmem_altmap *altmap);
void *vmemmap_alloc_block(unsigned long size, int node);
struct vmem_altmap;
void *vmemmap_alloc_block_buf(unsigned long size, int node,
struct vmem_altmap *altmap);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
int vmemmap_populate_basepages(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap);
int vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap);
void vmemmap_populate_print_last(void);




void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
unsigned long nr_pages);

enum mf_flags {
MF_COUNT_INCREASED = 1 << 0,
MF_ACTION_REQUIRED = 1 << 1,
MF_MUST_KILL = 1 << 2,
MF_SOFT_OFFLINE = 1 << 3,
MF_UNPOISON = 1 << 4,
};
extern int memory_failure(unsigned long pfn, int flags);
extern void memory_failure_queue(unsigned long pfn, int flags);
extern void memory_failure_queue_kick(int cpu);
extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p);
extern atomic_long_t num_poisoned_pages ;
extern int soft_offline_page(unsigned long pfn, int flags);



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
{
return 0;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_memory_failure(unsigned long pfn, int flags)
{
return -6;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool arch_is_platform_page(u64 paddr)
{
return false;
}





enum mf_result {
MF_IGNORED,
MF_FAILED,
MF_DELAYED,
MF_RECOVERED,
};

enum mf_action_page_type {
MF_MSG_KERNEL,
MF_MSG_KERNEL_HIGH_ORDER,
MF_MSG_SLAB,
MF_MSG_DIFFERENT_COMPOUND,
MF_MSG_HUGE,
MF_MSG_FREE_HUGE,
MF_MSG_NON_PMD_HUGE,
MF_MSG_UNMAP_FAILED,
MF_MSG_DIRTY_SWAPCACHE,
MF_MSG_CLEAN_SWAPCACHE,
MF_MSG_DIRTY_MLOCKED_LRU,
MF_MSG_CLEAN_MLOCKED_LRU,
MF_MSG_DIRTY_UNEVICTABLE_LRU,
MF_MSG_CLEAN_UNEVICTABLE_LRU,
MF_MSG_DIRTY_LRU,
MF_MSG_CLEAN_LRU,
MF_MSG_TRUNCATED_LRU,
MF_MSG_BUDDY,
MF_MSG_DAX,
MF_MSG_UNSPLIT_THP,
MF_MSG_DIFFERENT_PAGE_SIZE,
MF_MSG_UNKNOWN,
};


extern void clear_huge_page(struct page *page,
unsigned long addr_hint,
unsigned int pages_per_huge_page);
extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned long addr_hint,
struct vm_area_struct *vma,
unsigned int pages_per_huge_page);
extern long copy_huge_page_from_user(struct page *dst_page,
const void *usr_src,
unsigned int pages_per_huge_page,
bool allow_pagefault);
# 3281 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool vma_is_special_huge(const struct vm_area_struct *vma)
{
return vma_is_dax(vma) || (vma->vm_file &&
(vma->vm_flags & (0x00000400 | 0x10000000)));
}




extern unsigned int _debug_guardpage_minorder;
extern struct static_key_false _debug_guardpage_enabled;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int debug_guardpage_minorder(void)
{
return _debug_guardpage_minorder;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool debug_guardpage_enabled(void)
{
return __builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&_debug_guardpage_enabled)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&_debug_guardpage_enabled)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&_debug_guardpage_enabled)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&_debug_guardpage_enabled)->key) > 0; })), 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool page_is_guard(struct page *page)
{
if (!debug_guardpage_enabled())
return false;

return PageGuard(page);
}
# 3319 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void setup_nr_node_ids(void) {}


extern int memcmp_pages(struct page *page1, struct page *page2);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pages_identical(struct page *page1, struct page *page2)
{
return !memcmp_pages(page1, page2);
}
# 3341 "./include/linux/mm.h"
extern int sysctl_nr_trim_pages;


void mem_dump_obj(void *object);
# 3357 "./include/linux/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int seal_check_future_write(int seals, struct vm_area_struct *vma)
{
if (seals & 0x0010) {




if ((vma->vm_flags & 0x00000008) && (vma->vm_flags & 0x00000002))
return -1;
# 3374 "./include/linux/mm.h"
if (vma->vm_flags & 0x00000008)
vma->vm_flags &= ~(0x00000020);
}

return 0;
}


int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
unsigned long len_in,
struct anon_vma_name *anon_name);
# 25 "./include/linux/net.h" 2
# 1 "./include/linux/sockptr.h" 1
# 14 "./include/linux/sockptr.h"
typedef struct {
union {
void *kernel;
void *user;
};
bool is_kernel : 1;
} sockptr_t;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sockptr_is_kernel(sockptr_t sockptr)
{
return sockptr.is_kernel;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) sockptr_t KERNEL_SOCKPTR(void *p)
{
return (sockptr_t) { .kernel = p, .is_kernel = true };
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) sockptr_t USER_SOCKPTR(void *p)
{
return (sockptr_t) { .user = p };
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sockptr_is_null(sockptr_t sockptr)
{
if (sockptr_is_kernel(sockptr))
return !sockptr.kernel;
return !sockptr.user;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int copy_from_sockptr_offset(void *dst, sockptr_t src,
size_t offset, size_t size)
{
if (!sockptr_is_kernel(src))
return copy_from_user(dst, src.user + offset, size);
memcpy(dst, src.kernel + offset, size);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
{
return copy_from_sockptr_offset(dst, src, 0, size);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
const void *src, size_t size)
{
if (!sockptr_is_kernel(dst))
return copy_to_user(dst.user + offset, src, size);
memcpy(dst.kernel + offset, src, size);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *memdup_sockptr(sockptr_t src, size_t len)
{
void *p = __kmalloc_track_caller(len, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( gfp_t)0x100000u)) | (( gfp_t)0x2000u), (unsigned long)__builtin_return_address(0));

if (!p)
return ERR_PTR(-12);
if (copy_from_sockptr(p, src, len)) {
kfree(p);
return ERR_PTR(-14);
}
return p;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *memdup_sockptr_nul(sockptr_t src, size_t len)
{
char *p = __kmalloc_track_caller(len + 1, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)), (unsigned long)__builtin_return_address(0));

if (!p)
return ERR_PTR(-12);
if (copy_from_sockptr(p, src, len)) {
kfree(p);
return ERR_PTR(-14);
}
p[len] = '\0';
return p;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count)
{
if (sockptr_is_kernel(src)) {
size_t len = __builtin_choose_expr(((!!(sizeof((typeof(strnlen(src.kernel, count - 1) + 1) *)1 == (typeof(count) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(strnlen(src.kernel, count - 1) + 1) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(count) * 0l)) : (int *)8))))), ((strnlen(src.kernel, count - 1) + 1) < (count) ? (strnlen(src.kernel, count - 1) + 1) : (count)), ({ typeof(strnlen(src.kernel, count - 1) + 1) __UNIQUE_ID___x214 = (strnlen(src.kernel, count - 1) + 1); typeof(count) __UNIQUE_ID___y215 = (count); ((__UNIQUE_ID___x214) < (__UNIQUE_ID___y215) ? (__UNIQUE_ID___x214) : (__UNIQUE_ID___y215)); }));

memcpy(dst, src.kernel, len);
return len;
}
return strncpy_from_user(dst, src.user, count);
}
# 26 "./include/linux/net.h" 2

# 1 "./include/uapi/linux/net.h" 1
# 23 "./include/uapi/linux/net.h"
# 1 "./arch/riscv/include/generated/uapi/asm/socket.h" 1
# 24 "./include/uapi/linux/net.h" 2
# 48 "./include/uapi/linux/net.h"
typedef enum {
SS_FREE = 0,
SS_UNCONNECTED,
SS_CONNECTING,
SS_CONNECTED,
SS_DISCONNECTING
} socket_state;
# 28 "./include/linux/net.h" 2

struct poll_table_struct;
struct pipe_inode_info;
struct inode;
struct file;
struct net;
# 61 "./include/linux/net.h"
enum sock_type {
SOCK_STREAM = 1,
SOCK_DGRAM = 2,
SOCK_RAW = 3,
SOCK_RDM = 4,
SOCK_SEQPACKET = 5,
SOCK_DCCP = 6,
SOCK_PACKET = 10,
};
# 90 "./include/linux/net.h"
enum sock_shutdown_cmd {
SHUT_RD,
SHUT_WR,
SHUT_RDWR,
};

struct socket_wq {

wait_queue_head_t wait;
struct fasync_struct *fasync_list;
unsigned long flags;
struct callback_head rcu;
} __attribute__((__aligned__((1 << 6))));
# 114 "./include/linux/net.h"
struct socket {
socket_state state;

short type;

unsigned long flags;

struct file *file;
struct sock *sk;
const struct proto_ops *ops;

struct socket_wq wq;
};
# 137 "./include/linux/net.h"
typedef struct {
size_t written;
size_t count;
union {
char *buf;
void *data;
} arg;
int error;
} read_descriptor_t;

struct vm_area_struct;
struct page;
struct sockaddr;
struct msghdr;
struct module;
struct sk_buff;
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
unsigned int, size_t);

struct proto_ops {
int family;
struct module *owner;
int (*release) (struct socket *sock);
int (*bind) (struct socket *sock,
struct sockaddr *myaddr,
int sockaddr_len);
int (*connect) (struct socket *sock,
struct sockaddr *vaddr,
int sockaddr_len, int flags);
int (*socketpair)(struct socket *sock1,
struct socket *sock2);
int (*accept) (struct socket *sock,
struct socket *newsock, int flags, bool kern);
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int peer);
__poll_t (*poll) (struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
unsigned long arg);




int (*gettstamp) (struct socket *sock, void *userstamp,
bool timeval, bool time32);
int (*listen) (struct socket *sock, int len);
int (*shutdown) (struct socket *sock, int flags);
int (*setsockopt)(struct socket *sock, int level,
int optname, sockptr_t optval,
unsigned int optlen);
int (*getsockopt)(struct socket *sock, int level,
int optname, char *optval, int *optlen);
void (*show_fdinfo)(struct seq_file *m, struct socket *sock);
int (*sendmsg) (struct socket *sock, struct msghdr *m,
size_t total_len);
# 201 "./include/linux/net.h"
int (*recvmsg) (struct socket *sock, struct msghdr *m,
size_t total_len, int flags);
int (*mmap) (struct file *file, struct socket *sock,
struct vm_area_struct * vma);
ssize_t (*sendpage) (struct socket *sock, struct page *page,
int offset, size_t size, int flags);
ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
int (*set_peek_off)(struct sock *sk, int val);
int (*peek_len)(struct socket *sock);




int (*read_sock)(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
int (*sendpage_locked)(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
size_t size);
int (*set_rcvlowat)(struct sock *sk, int val);
};




struct net_proto_family {
int family;
int (*create)(struct net *net, struct socket *sock,
int protocol, int kern);
struct module *owner;
};

struct iovec;
struct kvec;

enum {
SOCK_WAKE_IO,
SOCK_WAKE_WAITD,
SOCK_WAKE_SPACE,
SOCK_WAKE_URG,
};

int sock_wake_async(struct socket_wq *sk_wq, int how, int band);
int sock_register(const struct net_proto_family *fam);
void sock_unregister(int family);
bool sock_is_registered(int family);
int __sock_create(struct net *net, int family, int type, int proto,
struct socket **res, int kern);
int sock_create(int family, int type, int proto, struct socket **res);
int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res);
int sock_create_lite(int family, int type, int proto, struct socket **res);
struct socket *sock_alloc(void);
void sock_release(struct socket *sock);
int sock_sendmsg(struct socket *sock, struct msghdr *msg);
int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags);
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
struct socket *sockfd_lookup(int fd, int *err);
struct socket *sock_from_file(struct file *file);

int net_ratelimit(void);
# 319 "./include/linux/net.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sendpage_ok(struct page *page)
{
return !PageSlab(page) && page_count(page) >= 1;
}

int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
struct kvec *vec, size_t num, size_t len);
int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len, int flags);

int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen);
int kernel_listen(struct socket *sock, int backlog);
int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
int flags);
int kernel_getsockname(struct socket *sock, struct sockaddr *addr);
int kernel_getpeername(struct socket *sock, struct sockaddr *addr);
int kernel_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);


u32 kernel_sock_ip_overhead(struct sock *sk);
# 33 "net/ipv6/route.c" 2
# 1 "./include/uapi/linux/route.h" 1
# 27 "./include/uapi/linux/route.h"
# 1 "./include/uapi/linux/if.h" 1
# 23 "./include/uapi/linux/if.h"
# 1 "./include/uapi/linux/libc-compat.h" 1
# 24 "./include/uapi/linux/if.h" 2
# 37 "./include/uapi/linux/if.h"
# 1 "./include/uapi/linux/hdlc/ioctl.h" 1
# 40 "./include/uapi/linux/hdlc/ioctl.h"
typedef struct {
unsigned int clock_rate;
unsigned int clock_type;
unsigned short loopback;
} sync_serial_settings;

typedef struct {
unsigned int clock_rate;
unsigned int clock_type;
unsigned short loopback;
unsigned int slot_map;
} te1_settings;

typedef struct {
unsigned short encoding;
unsigned short parity;
} raw_hdlc_proto;

typedef struct {
unsigned int t391;
unsigned int t392;
unsigned int n391;
unsigned int n392;
unsigned int n393;
unsigned short lmi;
unsigned short dce;
} fr_proto;

typedef struct {
unsigned int dlci;
} fr_proto_pvc;

typedef struct {
unsigned int dlci;
char master[16];
}fr_proto_pvc_info;

typedef struct {
unsigned int interval;
unsigned int timeout;
} cisco_proto;

typedef struct {
unsigned short dce;
unsigned int modulo;
unsigned int window;
unsigned int t1;
unsigned int t2;
unsigned int n2;
} x25_hdlc_proto;
# 38 "./include/uapi/linux/if.h" 2
# 82 "./include/uapi/linux/if.h"
enum net_device_flags {


IFF_UP = 1<<0,
IFF_BROADCAST = 1<<1,
IFF_DEBUG = 1<<2,
IFF_LOOPBACK = 1<<3,
IFF_POINTOPOINT = 1<<4,
IFF_NOTRAILERS = 1<<5,
IFF_RUNNING = 1<<6,
IFF_NOARP = 1<<7,
IFF_PROMISC = 1<<8,
IFF_ALLMULTI = 1<<9,
IFF_MASTER = 1<<10,
IFF_SLAVE = 1<<11,
IFF_MULTICAST = 1<<12,
IFF_PORTSEL = 1<<13,
IFF_AUTOMEDIA = 1<<14,
IFF_DYNAMIC = 1<<15,


IFF_LOWER_UP = 1<<16,
IFF_DORMANT = 1<<17,
IFF_ECHO = 1<<18,

};
# 167 "./include/uapi/linux/if.h"
enum {
IF_OPER_UNKNOWN,
IF_OPER_NOTPRESENT,
IF_OPER_DOWN,
IF_OPER_LOWERLAYERDOWN,
IF_OPER_TESTING,
IF_OPER_DORMANT,
IF_OPER_UP,
};


enum {
IF_LINK_MODE_DEFAULT,
IF_LINK_MODE_DORMANT,
IF_LINK_MODE_TESTING,
};
# 196 "./include/uapi/linux/if.h"
struct ifmap {
unsigned long mem_start;
unsigned long mem_end;
unsigned short base_addr;
unsigned char irq;
unsigned char dma;
unsigned char port;

};


struct if_settings {
unsigned int type;
unsigned int size;
union {

raw_hdlc_proto *raw_hdlc;
cisco_proto *cisco;
fr_proto *fr;
fr_proto_pvc *fr_pvc;
fr_proto_pvc_info *fr_pvc_info;
x25_hdlc_proto *x25;


sync_serial_settings *sync;
te1_settings *te1;
} ifs_ifsu;
};
# 234 "./include/uapi/linux/if.h"
struct ifreq {

union
{
char ifrn_name[16];
} ifr_ifrn;

union {
struct sockaddr ifru_addr;
struct sockaddr ifru_dstaddr;
struct sockaddr ifru_broadaddr;
struct sockaddr ifru_netmask;
struct sockaddr ifru_hwaddr;
short ifru_flags;
int ifru_ivalue;
int ifru_mtu;
struct ifmap ifru_map;
char ifru_slave[16];
char ifru_newname[16];
void * ifru_data;
struct if_settings ifru_settings;
} ifr_ifru;
};
# 286 "./include/uapi/linux/if.h"
struct ifconf {
int ifc_len;
union {
char *ifcu_buf;
struct ifreq *ifcu_req;
} ifc_ifcu;
};
# 28 "./include/uapi/linux/route.h" 2



struct rtentry {
unsigned long rt_pad1;
struct sockaddr rt_dst;
struct sockaddr rt_gateway;
struct sockaddr rt_genmask;
unsigned short rt_flags;
short rt_pad2;
unsigned long rt_pad3;
void *rt_pad4;
short rt_metric;
char *rt_dev;
unsigned long rt_mtu;



unsigned long rt_window;
unsigned short rt_irtt;
};
# 34 "net/ipv6/route.c" 2
# 1 "./include/linux/netdevice.h" 1
# 26 "./include/linux/netdevice.h"
# 1 "./include/linux/delay.h" 1
# 25 "./include/linux/delay.h"
extern unsigned long loops_per_jiffy;


# 1 "./arch/riscv/include/asm/delay.h" 1
# 10 "./arch/riscv/include/asm/delay.h"
extern unsigned long riscv_timebase;


extern void udelay(unsigned long usecs);


extern void ndelay(unsigned long nsecs);

extern void __delay(unsigned long cycles);
# 28 "./include/linux/delay.h" 2
# 57 "./include/linux/delay.h"
extern unsigned long lpj_fine;
void calibrate_delay(void);
void __attribute__((weak)) calibration_delay_done(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
void usleep_range_state(unsigned long min, unsigned long max,
unsigned int state);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void usleep_range(unsigned long min, unsigned long max)
{
usleep_range_state(min, max, 0x0002);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void usleep_idle_range(unsigned long min, unsigned long max)
{
usleep_range_state(min, max, (0x0002 | 0x0400));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ssleep(unsigned int seconds)
{
msleep(seconds * 1000);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fsleep(unsigned long usecs)
{
if (usecs <= 10)
udelay(usecs);
else if (usecs <= 20000)
usleep_range(usecs, 2 * usecs);
else
msleep((((usecs) + (1000) - 1) / (1000)));
}
# 27 "./include/linux/netdevice.h" 2

# 1 "./include/linux/prefetch.h" 1
# 18 "./include/linux/prefetch.h"
struct page;
# 55 "./include/linux/prefetch.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void prefetch_range(void *addr, size_t len)
{







}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void prefetch_page_address(struct page *page)
{



}
# 29 "./include/linux/netdevice.h" 2


# 1 "./arch/riscv/include/generated/asm/local.h" 1
# 1 "./include/asm-generic/local.h" 1






# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 8 "./include/asm-generic/local.h" 2
# 22 "./include/asm-generic/local.h"
typedef struct
{
atomic_long_t a;
} local_t;
# 2 "./arch/riscv/include/generated/asm/local.h" 2
# 32 "./include/linux/netdevice.h" 2




# 1 "./include/linux/dynamic_queue_limits.h" 1
# 43 "./include/linux/dynamic_queue_limits.h"
struct dql {

unsigned int num_queued;
unsigned int adj_limit;
unsigned int last_obj_cnt;



unsigned int limit __attribute__((__aligned__((1 << 6))));
unsigned int num_completed;

unsigned int prev_ovlimit;
unsigned int prev_num_queued;
unsigned int prev_last_obj_cnt;

unsigned int lowest_slack;
unsigned long slack_start_time;


unsigned int max_limit;
unsigned int min_limit;
unsigned int slack_hold_time;
};
# 75 "./include/linux/dynamic_queue_limits.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dql_queued(struct dql *dql, unsigned int count)
{
do { if (__builtin_expect(!!(count > ((~0U) / 16)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/dynamic_queue_limits.h"), "i" (77), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);

dql->last_obj_cnt = count;






__asm__ __volatile__("": : :"memory");

dql->num_queued += count;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dql_avail(const struct dql *dql)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_216(void) ; if (!((sizeof(dql->adj_limit) == sizeof(char) || sizeof(dql->adj_limit) == sizeof(short) || sizeof(dql->adj_limit) == sizeof(int) || sizeof(dql->adj_limit) == sizeof(long)) || sizeof(dql->adj_limit) == sizeof(long long))) __compiletime_assert_216(); } while (0); (*(const volatile typeof( _Generic((dql->adj_limit), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (dql->adj_limit))) *)&(dql->adj_limit)); }) - ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_217(void) ; if (!((sizeof(dql->num_queued) == sizeof(char) || sizeof(dql->num_queued) == sizeof(short) || sizeof(dql->num_queued) == sizeof(int) || sizeof(dql->num_queued) == sizeof(long)) || sizeof(dql->num_queued) == sizeof(long long))) __compiletime_assert_217(); } while (0); (*(const volatile typeof( _Generic((dql->num_queued), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (dql->num_queued))) *)&(dql->num_queued)); });
}


void dql_completed(struct dql *dql, unsigned int count);


void dql_reset(struct dql *dql);


void dql_init(struct dql *dql, unsigned int hold_time);
# 37 "./include/linux/netdevice.h" 2

# 1 "./include/net/net_namespace.h" 1
# 15 "./include/net/net_namespace.h"
# 1 "./include/net/flow.h" 1
# 12 "./include/net/flow.h"
# 1 "./include/linux/in6.h" 1
# 19 "./include/linux/in6.h"
# 1 "./include/uapi/linux/in6.h" 1
# 33 "./include/uapi/linux/in6.h"
struct in6_addr {
union {
__u8 u6_addr8[16];

__be16 u6_addr16[8];
__be32 u6_addr32[4];

} in6_u;





};



struct sockaddr_in6 {
unsigned short int sin6_family;
__be16 sin6_port;
__be32 sin6_flowinfo;
struct in6_addr sin6_addr;
__u32 sin6_scope_id;
};



struct ipv6_mreq {

struct in6_addr ipv6mr_multiaddr;


int ipv6mr_ifindex;
};




struct in6_flowlabel_req {
struct in6_addr flr_dst;
__be32 flr_label;
__u8 flr_action;
__u8 flr_share;
__u16 flr_flags;
__u16 flr_expires;
__u16 flr_linger;
__u32 __flr_pad;

};
# 20 "./include/linux/in6.h" 2





extern const struct in6_addr in6addr_any;

extern const struct in6_addr in6addr_loopback;

extern const struct in6_addr in6addr_linklocal_allnodes;


extern const struct in6_addr in6addr_linklocal_allrouters;


extern const struct in6_addr in6addr_interfacelocal_allnodes;


extern const struct in6_addr in6addr_interfacelocal_allrouters;


extern const struct in6_addr in6addr_sitelocal_allrouters;
# 13 "./include/net/flow.h" 2

# 1 "./include/net/flow_dissector.h" 1






# 1 "./include/linux/siphash.h" 1
# 20 "./include/linux/siphash.h"
typedef struct {
u64 key[2];
} siphash_key_t;



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool siphash_key_is_zero(const siphash_key_t *key)
{
return !(key->key[0] | key->key[1]);
}

u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);

u64 siphash_1u64(const u64 a, const siphash_key_t *key);
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
u64 siphash_3u64(const u64 a, const u64 b, const u64 c,
const siphash_key_t *key);
u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d,
const siphash_key_t *key);
u64 siphash_1u32(const u32 a, const siphash_key_t *key);
u64 siphash_3u32(const u32 a, const u32 b, const u32 c,
const siphash_key_t *key);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 siphash_2u32(const u32 a, const u32 b,
const siphash_key_t *key)
{
return siphash_1u64((u64)b << 32 | a, key);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 siphash_4u32(const u32 a, const u32 b, const u32 c,
const u32 d, const siphash_key_t *key)
{
return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 ___siphash_aligned(const __le64 *data, size_t len,
const siphash_key_t *key)
{
if (__builtin_constant_p(len) && len == 4)
return siphash_1u32(__le32_to_cpup((const __le32 *)data), key);
if (__builtin_constant_p(len) && len == 8)
return siphash_1u64((( __u64)(__le64)(data[0])), key);
if (__builtin_constant_p(len) && len == 16)
return siphash_2u64((( __u64)(__le64)(data[0])), (( __u64)(__le64)(data[1])),
key);
if (__builtin_constant_p(len) && len == 24)
return siphash_3u64((( __u64)(__le64)(data[0])), (( __u64)(__le64)(data[1])),
(( __u64)(__le64)(data[2])), key);
if (__builtin_constant_p(len) && len == 32)
return siphash_4u64((( __u64)(__le64)(data[0])), (( __u64)(__le64)(data[1])),
(( __u64)(__le64)(data[2])), (( __u64)(__le64)(data[3])),
key);
return __siphash_aligned(data, len, key);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 siphash(const void *data, size_t len,
const siphash_key_t *key)
{
if (0 ||
!((((unsigned long)data) & ((typeof((unsigned long)data))(__alignof__(u64)) - 1)) == 0))
return __siphash_unaligned(data, len, key);
return ___siphash_aligned(data, len, key);
}


typedef struct {
unsigned long key[2];
} hsiphash_key_t;

u32 __hsiphash_aligned(const void *data, size_t len,
const hsiphash_key_t *key);
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key);

u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c,
const hsiphash_key_t *key);
u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d,
const hsiphash_key_t *key);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 ___hsiphash_aligned(const __le32 *data, size_t len,
const hsiphash_key_t *key)
{
if (__builtin_constant_p(len) && len == 4)
return hsiphash_1u32((( __u32)(__le32)(data[0])), key);
if (__builtin_constant_p(len) && len == 8)
return hsiphash_2u32((( __u32)(__le32)(data[0])), (( __u32)(__le32)(data[1])),
key);
if (__builtin_constant_p(len) && len == 12)
return hsiphash_3u32((( __u32)(__le32)(data[0])), (( __u32)(__le32)(data[1])),
(( __u32)(__le32)(data[2])), key);
if (__builtin_constant_p(len) && len == 16)
return hsiphash_4u32((( __u32)(__le32)(data[0])), (( __u32)(__le32)(data[1])),
(( __u32)(__le32)(data[2])), (( __u32)(__le32)(data[3])),
key);
return __hsiphash_aligned(data, len, key);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 hsiphash(const void *data, size_t len,
const hsiphash_key_t *key)
{
if (0 ||
!((((unsigned long)data) & ((typeof((unsigned long)data))(__alignof__(unsigned long)) - 1)) == 0))
return __hsiphash_unaligned(data, len, key);
return ___hsiphash_aligned(data, len, key);
}
# 8 "./include/net/flow_dissector.h" 2

# 1 "./include/uapi/linux/if_ether.h" 1
# 171 "./include/uapi/linux/if_ether.h"
struct ethhdr {
unsigned char h_dest[6];
unsigned char h_source[6];
__be16 h_proto;
} __attribute__((packed));
# 10 "./include/net/flow_dissector.h" 2

struct bpf_prog;
struct net;
struct sk_buff;





struct flow_dissector_key_control {
u16 thoff;
u16 addr_type;
u32 flags;
};





enum flow_dissect_ret {
FLOW_DISSECT_RET_OUT_GOOD,
FLOW_DISSECT_RET_OUT_BAD,
FLOW_DISSECT_RET_PROTO_AGAIN,
FLOW_DISSECT_RET_IPPROTO_AGAIN,
FLOW_DISSECT_RET_CONTINUE,
};






struct flow_dissector_key_basic {
__be16 n_proto;
u8 ip_proto;
u8 padding;
};

struct flow_dissector_key_tags {
u32 flow_label;
};

struct flow_dissector_key_vlan {
union {
struct {
u16 vlan_id:12,
vlan_dei:1,
vlan_priority:3;
};
__be16 vlan_tci;
};
__be16 vlan_tpid;
__be16 vlan_eth_type;
u16 padding;
};

struct flow_dissector_mpls_lse {
u32 mpls_ttl:8,
mpls_bos:1,
mpls_tc:3,
mpls_label:20;
};


struct flow_dissector_key_mpls {
struct flow_dissector_mpls_lse ls[7];
u8 used_lses;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dissector_set_mpls_lse(struct flow_dissector_key_mpls *mpls,
int lse_index)
{
mpls->used_lses |= 1 << lse_index;
}
# 92 "./include/net/flow_dissector.h"
struct flow_dissector_key_enc_opts {
u8 data[255];


u8 len;
__be16 dst_opt_type;
};

struct flow_dissector_key_keyid {
__be32 keyid;
};






struct flow_dissector_key_ipv4_addrs {

__be32 src;
__be32 dst;
};






struct flow_dissector_key_ipv6_addrs {

struct in6_addr src;
struct in6_addr dst;
};





struct flow_dissector_key_tipc {
__be32 key;
};






struct flow_dissector_key_addrs {
union {
struct flow_dissector_key_ipv4_addrs v4addrs;
struct flow_dissector_key_ipv6_addrs v6addrs;
struct flow_dissector_key_tipc tipckey;
};
};
# 157 "./include/net/flow_dissector.h"
struct flow_dissector_key_arp {
__u32 sip;
__u32 tip;
__u8 op;
unsigned char sha[6];
unsigned char tha[6];
};







struct flow_dissector_key_ports {
union {
__be32 ports;
struct {
__be16 src;
__be16 dst;
};
};
};







struct flow_dissector_key_icmp {
struct {
u8 type;
u8 code;
};
u16 id;
};






struct flow_dissector_key_eth_addrs {

unsigned char dst[6];
unsigned char src[6];
};





struct flow_dissector_key_tcp {
__be16 flags;
};






struct flow_dissector_key_ip {
__u8 tos;
__u8 ttl;
};






struct flow_dissector_key_meta {
int ingress_ifindex;
u16 ingress_iftype;
};
# 241 "./include/net/flow_dissector.h"
struct flow_dissector_key_ct {
u16 ct_state;
u16 ct_zone;
u32 ct_mark;
u32 ct_labels[4];
};





struct flow_dissector_key_hash {
u32 hash;
};

enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_CONTROL,
FLOW_DISSECTOR_KEY_BASIC,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
FLOW_DISSECTOR_KEY_PORTS,
FLOW_DISSECTOR_KEY_PORTS_RANGE,
FLOW_DISSECTOR_KEY_ICMP,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
FLOW_DISSECTOR_KEY_TIPC,
FLOW_DISSECTOR_KEY_ARP,
FLOW_DISSECTOR_KEY_VLAN,
FLOW_DISSECTOR_KEY_FLOW_LABEL,
FLOW_DISSECTOR_KEY_GRE_KEYID,
FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
FLOW_DISSECTOR_KEY_ENC_KEYID,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
FLOW_DISSECTOR_KEY_ENC_PORTS,
FLOW_DISSECTOR_KEY_MPLS,
FLOW_DISSECTOR_KEY_TCP,
FLOW_DISSECTOR_KEY_IP,
FLOW_DISSECTOR_KEY_CVLAN,
FLOW_DISSECTOR_KEY_ENC_IP,
FLOW_DISSECTOR_KEY_ENC_OPTS,
FLOW_DISSECTOR_KEY_META,
FLOW_DISSECTOR_KEY_CT,
FLOW_DISSECTOR_KEY_HASH,

FLOW_DISSECTOR_KEY_MAX,
};






struct flow_dissector_key {
enum flow_dissector_key_id key_id;
size_t offset;

};

struct flow_dissector {
unsigned int used_keys;
unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
};

struct flow_keys_basic {
struct flow_dissector_key_control control;
struct flow_dissector_key_basic basic;
};

struct flow_keys {
struct flow_dissector_key_control control;

struct flow_dissector_key_basic basic __attribute__((__aligned__(__alignof__(u64))));
struct flow_dissector_key_tags tags;
struct flow_dissector_key_vlan vlan;
struct flow_dissector_key_vlan cvlan;
struct flow_dissector_key_keyid keyid;
struct flow_dissector_key_ports ports;
struct flow_dissector_key_icmp icmp;

struct flow_dissector_key_addrs addrs;
};




__be32 flow_get_u32_src(const struct flow_keys *flow);
__be32 flow_get_u32_dst(const struct flow_keys *flow);

extern struct flow_dissector flow_keys_dissector;
extern struct flow_dissector flow_keys_basic_dissector;
# 341 "./include/net/flow_dissector.h"
struct flow_keys_digest {
u8 data[16];
};

void make_flow_keys_digest(struct flow_keys_digest *digest,
const struct flow_keys *flow);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool flow_keys_have_l4(const struct flow_keys *keys)
{
return (keys->ports.ports || keys->tags.flow_label);
}

u32 flow_hash_from_keys(struct flow_keys *keys);
void skb_flow_get_icmp_tci(const struct sk_buff *skb,
struct flow_dissector_key_icmp *key_icmp,
const void *data, int thoff, int hlen);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dissector_uses_key(const struct flow_dissector *flow_dissector,
enum flow_dissector_key_id key_id)
{
return flow_dissector->used_keys & (1 << key_id);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *skb_flow_dissector_target(struct flow_dissector *flow_dissector,
enum flow_dissector_key_id key_id,
void *target_container)
{
return ((char *)target_container) + flow_dissector->offset[key_id];
}

struct bpf_flow_dissector {
struct bpf_flow_keys *flow_keys;
const struct sk_buff *skb;
const void *data;
const void *data_end;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
struct flow_dissector_key_basic *key_basic)
{
memset(key_control, 0, sizeof(*key_control));
memset(key_basic, 0, sizeof(*key_basic));
}


int flow_dissector_bpf_prog_attach_check(struct net *net,
struct bpf_prog *prog);
# 15 "./include/net/flow.h" 2
# 25 "./include/net/flow.h"
struct flowi_tunnel {
__be64 tun_id;
};

struct flowi_common {
int flowic_oif;
int flowic_iif;
int flowic_l3mdev;
__u32 flowic_mark;
__u8 flowic_tos;
__u8 flowic_scope;
__u8 flowic_proto;
__u8 flowic_flags;


__u32 flowic_secid;
kuid_t flowic_uid;
struct flowi_tunnel flowic_tun_key;
__u32 flowic_multipath_hash;
};

union flowi_uli {
struct {
__be16 dport;
__be16 sport;
} ports;

struct {
__u8 type;
__u8 code;
} icmpt;

struct {
__le16 dport;
__le16 sport;
} dnports;

__be32 gre_key;

struct {
__u8 type;
} mht;
};

struct flowi4 {
struct flowi_common __fl_common;
# 85 "./include/net/flow.h"
__be32 saddr;
__be32 daddr;

union flowi_uli uli;






} __attribute__((__aligned__(64/8)));

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flowi4_init_output(struct flowi4 *fl4, int oif,
__u32 mark, __u8 tos, __u8 scope,
__u8 proto, __u8 flags,
__be32 daddr, __be32 saddr,
__be16 dport, __be16 sport,
kuid_t uid)
{
fl4->__fl_common.flowic_oif = oif;
fl4->__fl_common.flowic_iif = 1;
fl4->__fl_common.flowic_l3mdev = 0;
fl4->__fl_common.flowic_mark = mark;
fl4->__fl_common.flowic_tos = tos;
fl4->__fl_common.flowic_scope = scope;
fl4->__fl_common.flowic_proto = proto;
fl4->__fl_common.flowic_flags = flags;
fl4->__fl_common.flowic_secid = 0;
fl4->__fl_common.flowic_tun_key.tun_id = 0;
fl4->__fl_common.flowic_uid = uid;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->uli.ports.dport = dport;
fl4->uli.ports.sport = sport;
fl4->__fl_common.flowic_multipath_hash = 0;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos,
__be32 daddr, __be32 saddr)
{
fl4->__fl_common.flowic_oif = oif;
fl4->__fl_common.flowic_tos = tos;
fl4->daddr = daddr;
fl4->saddr = saddr;
}


struct flowi6 {
struct flowi_common __fl_common;
# 145 "./include/net/flow.h"
struct in6_addr daddr;
struct in6_addr saddr;

__be32 flowlabel;
union flowi_uli uli;






__u32 mp_hash;
} __attribute__((__aligned__(64/8)));

struct flowidn {
struct flowi_common __fl_common;






__le16 daddr;
__le16 saddr;
union flowi_uli uli;


} __attribute__((__aligned__(64/8)));

struct flowi {
union {
struct flowi_common __fl_common;
struct flowi4 ip4;
struct flowi6 ip6;
struct flowidn dn;
} u;
# 192 "./include/net/flow.h"
} __attribute__((__aligned__(64/8)));

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
{
return ({ void *__mptr = (void *)(fl4); _Static_assert(__builtin_types_compatible_p(typeof(*(fl4)), typeof(((struct flowi *)0)->u.ip4)) || __builtin_types_compatible_p(typeof(*(fl4)), typeof(void)), "pointer type mismatch in container_of()"); ((struct flowi *)(__mptr - __builtin_offsetof(struct flowi, u.ip4))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct flowi_common *flowi4_to_flowi_common(struct flowi4 *fl4)
{
return &(fl4->__fl_common);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct flowi *flowi6_to_flowi(struct flowi6 *fl6)
{
return ({ void *__mptr = (void *)(fl6); _Static_assert(__builtin_types_compatible_p(typeof(*(fl6)), typeof(((struct flowi *)0)->u.ip6)) || __builtin_types_compatible_p(typeof(*(fl6)), typeof(void)), "pointer type mismatch in container_of()"); ((struct flowi *)(__mptr - __builtin_offsetof(struct flowi, u.ip6))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct flowi_common *flowi6_to_flowi_common(struct flowi6 *fl6)
{
return &(fl6->__fl_common);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct flowi *flowidn_to_flowi(struct flowidn *fldn)
{
return ({ void *__mptr = (void *)(fldn); _Static_assert(__builtin_types_compatible_p(typeof(*(fldn)), typeof(((struct flowi *)0)->u.dn)) || __builtin_types_compatible_p(typeof(*(fldn)), typeof(void)), "pointer type mismatch in container_of()"); ((struct flowi *)(__mptr - __builtin_offsetof(struct flowi, u.dn))); });
}

__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
# 16 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/core.h" 1




struct ctl_table_header;
struct prot_inuse;

struct netns_core {

struct ctl_table_header *sysctl_hdr;

int sysctl_somaxconn;
u8 sysctl_txrehash;


struct prot_inuse *prot_inuse;

};
# 17 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/mib.h" 1




# 1 "./include/net/snmp.h" 1
# 18 "./include/net/snmp.h"
# 1 "./include/uapi/linux/snmp.h" 1
# 19 "./include/uapi/linux/snmp.h"
enum
{
IPSTATS_MIB_NUM = 0,

IPSTATS_MIB_INPKTS,
IPSTATS_MIB_INOCTETS,
IPSTATS_MIB_INDELIVERS,
IPSTATS_MIB_OUTFORWDATAGRAMS,
IPSTATS_MIB_OUTPKTS,
IPSTATS_MIB_OUTOCTETS,

IPSTATS_MIB_INHDRERRORS,
IPSTATS_MIB_INTOOBIGERRORS,
IPSTATS_MIB_INNOROUTES,
IPSTATS_MIB_INADDRERRORS,
IPSTATS_MIB_INUNKNOWNPROTOS,
IPSTATS_MIB_INTRUNCATEDPKTS,
IPSTATS_MIB_INDISCARDS,
IPSTATS_MIB_OUTDISCARDS,
IPSTATS_MIB_OUTNOROUTES,
IPSTATS_MIB_REASMTIMEOUT,
IPSTATS_MIB_REASMREQDS,
IPSTATS_MIB_REASMOKS,
IPSTATS_MIB_REASMFAILS,
IPSTATS_MIB_FRAGOKS,
IPSTATS_MIB_FRAGFAILS,
IPSTATS_MIB_FRAGCREATES,
IPSTATS_MIB_INMCASTPKTS,
IPSTATS_MIB_OUTMCASTPKTS,
IPSTATS_MIB_INBCASTPKTS,
IPSTATS_MIB_OUTBCASTPKTS,
IPSTATS_MIB_INMCASTOCTETS,
IPSTATS_MIB_OUTMCASTOCTETS,
IPSTATS_MIB_INBCASTOCTETS,
IPSTATS_MIB_OUTBCASTOCTETS,
IPSTATS_MIB_CSUMERRORS,
IPSTATS_MIB_NOECTPKTS,
IPSTATS_MIB_ECT1PKTS,
IPSTATS_MIB_ECT0PKTS,
IPSTATS_MIB_CEPKTS,
IPSTATS_MIB_REASM_OVERLAPS,
__IPSTATS_MIB_MAX
};






enum
{
ICMP_MIB_NUM = 0,
ICMP_MIB_INMSGS,
ICMP_MIB_INERRORS,
ICMP_MIB_INDESTUNREACHS,
ICMP_MIB_INTIMEEXCDS,
ICMP_MIB_INPARMPROBS,
ICMP_MIB_INSRCQUENCHS,
ICMP_MIB_INREDIRECTS,
ICMP_MIB_INECHOS,
ICMP_MIB_INECHOREPS,
ICMP_MIB_INTIMESTAMPS,
ICMP_MIB_INTIMESTAMPREPS,
ICMP_MIB_INADDRMASKS,
ICMP_MIB_INADDRMASKREPS,
ICMP_MIB_OUTMSGS,
ICMP_MIB_OUTERRORS,
ICMP_MIB_OUTDESTUNREACHS,
ICMP_MIB_OUTTIMEEXCDS,
ICMP_MIB_OUTPARMPROBS,
ICMP_MIB_OUTSRCQUENCHS,
ICMP_MIB_OUTREDIRECTS,
ICMP_MIB_OUTECHOS,
ICMP_MIB_OUTECHOREPS,
ICMP_MIB_OUTTIMESTAMPS,
ICMP_MIB_OUTTIMESTAMPREPS,
ICMP_MIB_OUTADDRMASKS,
ICMP_MIB_OUTADDRMASKREPS,
ICMP_MIB_CSUMERRORS,
__ICMP_MIB_MAX
};







enum
{
ICMP6_MIB_NUM = 0,
ICMP6_MIB_INMSGS,
ICMP6_MIB_INERRORS,
ICMP6_MIB_OUTMSGS,
ICMP6_MIB_OUTERRORS,
ICMP6_MIB_CSUMERRORS,
__ICMP6_MIB_MAX
};
# 125 "./include/uapi/linux/snmp.h"
enum
{
TCP_MIB_NUM = 0,
TCP_MIB_RTOALGORITHM,
TCP_MIB_RTOMIN,
TCP_MIB_RTOMAX,
TCP_MIB_MAXCONN,
TCP_MIB_ACTIVEOPENS,
TCP_MIB_PASSIVEOPENS,
TCP_MIB_ATTEMPTFAILS,
TCP_MIB_ESTABRESETS,
TCP_MIB_CURRESTAB,
TCP_MIB_INSEGS,
TCP_MIB_OUTSEGS,
TCP_MIB_RETRANSSEGS,
TCP_MIB_INERRS,
TCP_MIB_OUTRSTS,
TCP_MIB_CSUMERRORS,
__TCP_MIB_MAX
};






enum
{
UDP_MIB_NUM = 0,
UDP_MIB_INDATAGRAMS,
UDP_MIB_NOPORTS,
UDP_MIB_INERRORS,
UDP_MIB_OUTDATAGRAMS,
UDP_MIB_RCVBUFERRORS,
UDP_MIB_SNDBUFERRORS,
UDP_MIB_CSUMERRORS,
UDP_MIB_IGNOREDMULTI,
UDP_MIB_MEMERRORS,
__UDP_MIB_MAX
};


enum
{
LINUX_MIB_NUM = 0,
LINUX_MIB_SYNCOOKIESSENT,
LINUX_MIB_SYNCOOKIESRECV,
LINUX_MIB_SYNCOOKIESFAILED,
LINUX_MIB_EMBRYONICRSTS,
LINUX_MIB_PRUNECALLED,
LINUX_MIB_RCVPRUNED,
LINUX_MIB_OFOPRUNED,
LINUX_MIB_OUTOFWINDOWICMPS,
LINUX_MIB_LOCKDROPPEDICMPS,
LINUX_MIB_ARPFILTER,
LINUX_MIB_TIMEWAITED,
LINUX_MIB_TIMEWAITRECYCLED,
LINUX_MIB_TIMEWAITKILLED,
LINUX_MIB_PAWSACTIVEREJECTED,
LINUX_MIB_PAWSESTABREJECTED,
LINUX_MIB_DELAYEDACKS,
LINUX_MIB_DELAYEDACKLOCKED,
LINUX_MIB_DELAYEDACKLOST,
LINUX_MIB_LISTENOVERFLOWS,
LINUX_MIB_LISTENDROPS,
LINUX_MIB_TCPHPHITS,
LINUX_MIB_TCPPUREACKS,
LINUX_MIB_TCPHPACKS,
LINUX_MIB_TCPRENORECOVERY,
LINUX_MIB_TCPSACKRECOVERY,
LINUX_MIB_TCPSACKRENEGING,
LINUX_MIB_TCPSACKREORDER,
LINUX_MIB_TCPRENOREORDER,
LINUX_MIB_TCPTSREORDER,
LINUX_MIB_TCPFULLUNDO,
LINUX_MIB_TCPPARTIALUNDO,
LINUX_MIB_TCPDSACKUNDO,
LINUX_MIB_TCPLOSSUNDO,
LINUX_MIB_TCPLOSTRETRANSMIT,
LINUX_MIB_TCPRENOFAILURES,
LINUX_MIB_TCPSACKFAILURES,
LINUX_MIB_TCPLOSSFAILURES,
LINUX_MIB_TCPFASTRETRANS,
LINUX_MIB_TCPSLOWSTARTRETRANS,
LINUX_MIB_TCPTIMEOUTS,
LINUX_MIB_TCPLOSSPROBES,
LINUX_MIB_TCPLOSSPROBERECOVERY,
LINUX_MIB_TCPRENORECOVERYFAIL,
LINUX_MIB_TCPSACKRECOVERYFAIL,
LINUX_MIB_TCPRCVCOLLAPSED,
LINUX_MIB_TCPDSACKOLDSENT,
LINUX_MIB_TCPDSACKOFOSENT,
LINUX_MIB_TCPDSACKRECV,
LINUX_MIB_TCPDSACKOFORECV,
LINUX_MIB_TCPABORTONDATA,
LINUX_MIB_TCPABORTONCLOSE,
LINUX_MIB_TCPABORTONMEMORY,
LINUX_MIB_TCPABORTONTIMEOUT,
LINUX_MIB_TCPABORTONLINGER,
LINUX_MIB_TCPABORTFAILED,
LINUX_MIB_TCPMEMORYPRESSURES,
LINUX_MIB_TCPMEMORYPRESSURESCHRONO,
LINUX_MIB_TCPSACKDISCARD,
LINUX_MIB_TCPDSACKIGNOREDOLD,
LINUX_MIB_TCPDSACKIGNOREDNOUNDO,
LINUX_MIB_TCPSPURIOUSRTOS,
LINUX_MIB_TCPMD5NOTFOUND,
LINUX_MIB_TCPMD5UNEXPECTED,
LINUX_MIB_TCPMD5FAILURE,
LINUX_MIB_SACKSHIFTED,
LINUX_MIB_SACKMERGED,
LINUX_MIB_SACKSHIFTFALLBACK,
LINUX_MIB_TCPBACKLOGDROP,
LINUX_MIB_PFMEMALLOCDROP,
LINUX_MIB_TCPMINTTLDROP,
LINUX_MIB_TCPDEFERACCEPTDROP,
LINUX_MIB_IPRPFILTER,
LINUX_MIB_TCPTIMEWAITOVERFLOW,
LINUX_MIB_TCPREQQFULLDOCOOKIES,
LINUX_MIB_TCPREQQFULLDROP,
LINUX_MIB_TCPRETRANSFAIL,
LINUX_MIB_TCPRCVCOALESCE,
LINUX_MIB_TCPBACKLOGCOALESCE,
LINUX_MIB_TCPOFOQUEUE,
LINUX_MIB_TCPOFODROP,
LINUX_MIB_TCPOFOMERGE,
LINUX_MIB_TCPCHALLENGEACK,
LINUX_MIB_TCPSYNCHALLENGE,
LINUX_MIB_TCPFASTOPENACTIVE,
LINUX_MIB_TCPFASTOPENACTIVEFAIL,
LINUX_MIB_TCPFASTOPENPASSIVE,
LINUX_MIB_TCPFASTOPENPASSIVEFAIL,
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW,
LINUX_MIB_TCPFASTOPENCOOKIEREQD,
LINUX_MIB_TCPFASTOPENBLACKHOLE,
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES,
LINUX_MIB_BUSYPOLLRXPACKETS,
LINUX_MIB_TCPAUTOCORKING,
LINUX_MIB_TCPFROMZEROWINDOWADV,
LINUX_MIB_TCPTOZEROWINDOWADV,
LINUX_MIB_TCPWANTZEROWINDOWADV,
LINUX_MIB_TCPSYNRETRANS,
LINUX_MIB_TCPORIGDATASENT,
LINUX_MIB_TCPHYSTARTTRAINDETECT,
LINUX_MIB_TCPHYSTARTTRAINCWND,
LINUX_MIB_TCPHYSTARTDELAYDETECT,
LINUX_MIB_TCPHYSTARTDELAYCWND,
LINUX_MIB_TCPACKSKIPPEDSYNRECV,
LINUX_MIB_TCPACKSKIPPEDPAWS,
LINUX_MIB_TCPACKSKIPPEDSEQ,
LINUX_MIB_TCPACKSKIPPEDFINWAIT2,
LINUX_MIB_TCPACKSKIPPEDTIMEWAIT,
LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
LINUX_MIB_TCPWINPROBE,
LINUX_MIB_TCPKEEPALIVE,
LINUX_MIB_TCPMTUPFAIL,
LINUX_MIB_TCPMTUPSUCCESS,
LINUX_MIB_TCPDELIVERED,
LINUX_MIB_TCPDELIVEREDCE,
LINUX_MIB_TCPACKCOMPRESSED,
LINUX_MIB_TCPZEROWINDOWDROP,
LINUX_MIB_TCPRCVQDROP,
LINUX_MIB_TCPWQUEUETOOBIG,
LINUX_MIB_TCPFASTOPENPASSIVEALTKEY,
LINUX_MIB_TCPTIMEOUTREHASH,
LINUX_MIB_TCPDUPLICATEDATAREHASH,
LINUX_MIB_TCPDSACKRECVSEGS,
LINUX_MIB_TCPDSACKIGNOREDDUBIOUS,
LINUX_MIB_TCPMIGRATEREQSUCCESS,
LINUX_MIB_TCPMIGRATEREQFAILURE,
__LINUX_MIB_MAX
};


enum
{
LINUX_MIB_XFRMNUM = 0,
LINUX_MIB_XFRMINERROR,
LINUX_MIB_XFRMINBUFFERERROR,
LINUX_MIB_XFRMINHDRERROR,
LINUX_MIB_XFRMINNOSTATES,
LINUX_MIB_XFRMINSTATEPROTOERROR,
LINUX_MIB_XFRMINSTATEMODEERROR,
LINUX_MIB_XFRMINSTATESEQERROR,
LINUX_MIB_XFRMINSTATEEXPIRED,
LINUX_MIB_XFRMINSTATEMISMATCH,
LINUX_MIB_XFRMINSTATEINVALID,
LINUX_MIB_XFRMINTMPLMISMATCH,
LINUX_MIB_XFRMINNOPOLS,
LINUX_MIB_XFRMINPOLBLOCK,
LINUX_MIB_XFRMINPOLERROR,
LINUX_MIB_XFRMOUTERROR,
LINUX_MIB_XFRMOUTBUNDLEGENERROR,
LINUX_MIB_XFRMOUTBUNDLECHECKERROR,
LINUX_MIB_XFRMOUTNOSTATES,
LINUX_MIB_XFRMOUTSTATEPROTOERROR,
LINUX_MIB_XFRMOUTSTATEMODEERROR,
LINUX_MIB_XFRMOUTSTATESEQERROR,
LINUX_MIB_XFRMOUTSTATEEXPIRED,
LINUX_MIB_XFRMOUTPOLBLOCK,
LINUX_MIB_XFRMOUTPOLDEAD,
LINUX_MIB_XFRMOUTPOLERROR,
LINUX_MIB_XFRMFWDHDRERROR,
LINUX_MIB_XFRMOUTSTATEINVALID,
LINUX_MIB_XFRMACQUIREERROR,
__LINUX_MIB_XFRMMAX
};


enum
{
LINUX_MIB_TLSNUM = 0,
LINUX_MIB_TLSCURRTXSW,
LINUX_MIB_TLSCURRRXSW,
LINUX_MIB_TLSCURRTXDEVICE,
LINUX_MIB_TLSCURRRXDEVICE,
LINUX_MIB_TLSTXSW,
LINUX_MIB_TLSRXSW,
LINUX_MIB_TLSTXDEVICE,
LINUX_MIB_TLSRXDEVICE,
LINUX_MIB_TLSDECRYPTERROR,
LINUX_MIB_TLSRXDEVICERESYNC,
__LINUX_MIB_TLSMAX
};
# 19 "./include/net/snmp.h" 2
# 29 "./include/net/snmp.h"
struct snmp_mib {
const char *name;
int entry;
};
# 47 "./include/net/snmp.h"
# 1 "./include/linux/u64_stats_sync.h" 1
# 68 "./include/linux/u64_stats_sync.h"
struct u64_stats_sync {



};



# 1 "./arch/riscv/include/generated/asm/local64.h" 1
# 1 "./include/asm-generic/local64.h" 1





# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 7 "./include/asm-generic/local64.h" 2
# 22 "./include/asm-generic/local64.h"
# 1 "./arch/riscv/include/generated/asm/local.h" 1
# 23 "./include/asm-generic/local64.h" 2

typedef struct {
local_t a;
} local64_t;
# 2 "./arch/riscv/include/generated/asm/local64.h" 2
# 76 "./include/linux/u64_stats_sync.h" 2

typedef struct {
local64_t v;
} u64_stats_t ;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 u64_stats_read(const u64_stats_t *p)
{
return atomic_long_read(&(&(&p->v)->a)->a);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void u64_stats_set(u64_stats_t *p, u64 val)
{
atomic_long_set((&((&(&p->v)->a))->a),((val)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void u64_stats_add(u64_stats_t *p, unsigned long val)
{
atomic_long_add(((val)),(&((&(&p->v)->a))->a));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void u64_stats_inc(u64_stats_t *p)
{
atomic_long_inc(&(&(&p->v)->a)->a);
}
# 131 "./include/linux/u64_stats_sync.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void u64_stats_init(struct u64_stats_sync *syncp)
{
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void u64_stats_update_begin(struct u64_stats_sync *syncp)
{





}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void u64_stats_update_end(struct u64_stats_sync *syncp)
{





}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
{
unsigned long flags = 0;
# 166 "./include/linux/u64_stats_sync.h"
return flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
unsigned long flags)
{







}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{



return 0;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{



return __u64_stats_fetch_begin(syncp);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{



return false;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{



return __u64_stats_fetch_retry(syncp, start);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
{





return __u64_stats_fetch_begin(syncp);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
unsigned int start)
{





return __u64_stats_fetch_retry(syncp, start);
}
# 48 "./include/net/snmp.h" 2



struct ipstats_mib {

u64 mibs[__IPSTATS_MIB_MAX];
struct u64_stats_sync syncp;
};



struct icmp_mib {
unsigned long mibs[__ICMP_MIB_MAX];
};


struct icmpmsg_mib {
atomic_long_t mibs[512];
};




struct icmpv6_mib {
unsigned long mibs[__ICMP6_MIB_MAX];
};

struct icmpv6_mib_device {
atomic_long_t mibs[__ICMP6_MIB_MAX];
};



struct icmpv6msg_mib {
atomic_long_t mibs[512];
};

struct icmpv6msg_mib_device {
atomic_long_t mibs[512];
};




struct tcp_mib {
unsigned long mibs[__TCP_MIB_MAX];
};



struct udp_mib {
unsigned long mibs[__UDP_MIB_MAX];
};



struct linux_mib {
unsigned long mibs[__LINUX_MIB_MAX];
};



struct linux_xfrm_mib {
unsigned long mibs[__LINUX_MIB_XFRMMAX];
};



struct linux_tls_mib {
unsigned long mibs[__LINUX_MIB_TLSMAX];
};
# 6 "./include/net/netns/mib.h" 2

struct netns_mib {
__typeof__(struct ipstats_mib) *ip_statistics;

__typeof__(struct ipstats_mib) *ipv6_statistics;


__typeof__(struct tcp_mib) *tcp_statistics;
__typeof__(struct linux_mib) *net_statistics;

__typeof__(struct udp_mib) *udp_statistics;

__typeof__(struct udp_mib) *udp_stats_in6;
# 31 "./include/net/netns/mib.h"
__typeof__(struct udp_mib) *udplite_statistics;

__typeof__(struct udp_mib) *udplite_stats_in6;


__typeof__(struct icmp_mib) *icmp_statistics;
__typeof__(struct icmpmsg_mib) *icmpmsg_statistics;

__typeof__(struct icmpv6_mib) *icmpv6_statistics;
__typeof__(struct icmpv6msg_mib) *icmpv6msg_statistics;
struct proc_dir_entry *proc_net_devsnmp6;

};
# 18 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/unix.h" 1







struct ctl_table_header;
struct netns_unix {
int sysctl_max_dgram_qlen;
struct ctl_table_header *ctl;
};
# 19 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/packet.h" 1
# 11 "./include/net/netns/packet.h"
struct netns_packet {
struct mutex sklist_lock;
struct hlist_head sklist;
};
# 20 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/ipv4.h" 1
# 10 "./include/net/netns/ipv4.h"
# 1 "./include/net/inet_frag.h" 1








struct fqdir {

long high_thresh;
long low_thresh;
int timeout;
int max_dist;
struct inet_frags *f;
struct net *net;
bool dead;

struct rhashtable rhashtable __attribute__((__aligned__((1 << 6))));


atomic_long_t mem __attribute__((__aligned__((1 << 6))));
struct work_struct destroy_work;
struct llist_node free_list;
};
# 35 "./include/net/inet_frag.h"
enum {
INET_FRAG_FIRST_IN = ((((1UL))) << (0)),
INET_FRAG_LAST_IN = ((((1UL))) << (1)),
INET_FRAG_COMPLETE = ((((1UL))) << (2)),
INET_FRAG_HASH_DEAD = ((((1UL))) << (3)),
};

struct frag_v4_compare_key {
__be32 saddr;
__be32 daddr;
u32 user;
u32 vif;
__be16 id;
u16 protocol;
};

struct frag_v6_compare_key {
struct in6_addr saddr;
struct in6_addr daddr;
u32 user;
__be32 id;
u32 iif;
};
# 79 "./include/net/inet_frag.h"
struct inet_frag_queue {
struct rhash_head node;
union {
struct frag_v4_compare_key v4;
struct frag_v6_compare_key v6;
} key;
struct timer_list timer;
spinlock_t lock;
refcount_t refcnt;
struct rb_root rb_fragments;
struct sk_buff *fragments_tail;
struct sk_buff *last_run_head;
ktime_t stamp;
int len;
int meat;
u8 mono_delivery_time;
__u8 flags;
u16 max_size;
struct fqdir *fqdir;
struct callback_head rcu;
};

struct inet_frags {
unsigned int qsize;

void (*constructor)(struct inet_frag_queue *q,
const void *arg);
void (*destructor)(struct inet_frag_queue *);
void (*frag_expire)(struct timer_list *t);
struct kmem_cache *frags_cachep;
const char *frags_cache_name;
struct rhashtable_params rhash_params;
refcount_t refcnt;
struct completion completion;
};

int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);

int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fqdir_pre_exit(struct fqdir *fqdir)
{



do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_218(void) ; if (!((sizeof(fqdir->high_thresh) == sizeof(char) || sizeof(fqdir->high_thresh) == sizeof(short) || sizeof(fqdir->high_thresh) == sizeof(int) || sizeof(fqdir->high_thresh) == sizeof(long)) || sizeof(fqdir->high_thresh) == sizeof(long long))) __compiletime_assert_218(); } while (0); do { *(volatile typeof(fqdir->high_thresh) *)&(fqdir->high_thresh) = (0); } while (0); } while (0);




do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_219(void) ; if (!((sizeof(fqdir->dead) == sizeof(char) || sizeof(fqdir->dead) == sizeof(short) || sizeof(fqdir->dead) == sizeof(int) || sizeof(fqdir->dead) == sizeof(long)) || sizeof(fqdir->dead) == sizeof(long long))) __compiletime_assert_219(); } while (0); do { *(volatile typeof(fqdir->dead) *)&(fqdir->dead) = (true); } while (0); } while (0);
}
void fqdir_exit(struct fqdir *fqdir);

void inet_frag_kill(struct inet_frag_queue *q);
void inet_frag_destroy(struct inet_frag_queue *q);
struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key);


unsigned int inet_frag_rbtree_purge(struct rb_root *root);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_frag_put(struct inet_frag_queue *q)
{
if (refcount_dec_and_test(&q->refcnt))
inet_frag_destroy(q);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long frag_mem_limit(const struct fqdir *fqdir)
{
return atomic_long_read(&fqdir->mem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sub_frag_mem_limit(struct fqdir *fqdir, long val)
{
atomic_long_sub(val, &fqdir->mem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void add_frag_mem_limit(struct fqdir *fqdir, long val)
{
atomic_long_add(val, &fqdir->mem);
}
# 173 "./include/net/inet_frag.h"
extern const u8 ip_frag_ecn_table[16];





int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
int offset, int end);
void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
struct sk_buff *parent);
void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
void *reasm_data, bool try_coalesce);
struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
# 11 "./include/net/netns/ipv4.h" 2



struct ctl_table_header;
struct ipv4_devconf;
struct fib_rules_ops;
struct hlist_head;
struct fib_table;
struct sock;
struct local_ports {
seqlock_t lock;
int range[2];
bool warned;
};

struct ping_group_range {
seqlock_t lock;
kgid_t range[2];
};

struct inet_hashinfo;

struct inet_timewait_death_row {
refcount_t tw_refcount;

struct inet_hashinfo *hashinfo __attribute__((__aligned__((1 << 6))));
int sysctl_max_tw_buckets;
};

struct tcp_fastopen_context;

struct netns_ipv4 {
struct inet_timewait_death_row *tcp_death_row;


struct ctl_table_header *forw_hdr;
struct ctl_table_header *frags_hdr;
struct ctl_table_header *ipv4_hdr;
struct ctl_table_header *route_hdr;
struct ctl_table_header *xfrm4_hdr;

struct ipv4_devconf *devconf_all;
struct ipv4_devconf *devconf_dflt;
struct ip_ra_chain *ra_chain;
struct mutex ra_mutex;







bool fib_has_custom_local_routes;
bool fib_offload_disabled;



struct hlist_head *fib_table_hash;
struct sock *fibnl;

struct sock *mc_autojoin_sk;

struct inet_peer_base *peers;
struct fqdir *fqdir;

u8 sysctl_icmp_echo_ignore_all;
u8 sysctl_icmp_echo_enable_probe;
u8 sysctl_icmp_echo_ignore_broadcasts;
u8 sysctl_icmp_ignore_bogus_error_responses;
u8 sysctl_icmp_errors_use_inbound_ifaddr;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;

u32 ip_rt_min_pmtu;
int ip_rt_mtu_expires;
int ip_rt_min_advmss;

struct local_ports ip_local_ports;

u8 sysctl_tcp_ecn;
u8 sysctl_tcp_ecn_fallback;

u8 sysctl_ip_default_ttl;
u8 sysctl_ip_no_pmtu_disc;
u8 sysctl_ip_fwd_use_pmtu;
u8 sysctl_ip_fwd_update_priority;
u8 sysctl_ip_nonlocal_bind;
u8 sysctl_ip_autobind_reuse;

u8 sysctl_ip_dynaddr;
u8 sysctl_ip_early_demux;



u8 sysctl_tcp_early_demux;
u8 sysctl_udp_early_demux;

u8 sysctl_nexthop_compat_mode;

u8 sysctl_fwmark_reflect;
u8 sysctl_tcp_fwmark_accept;



u8 sysctl_tcp_mtu_probing;
int sysctl_tcp_mtu_probe_floor;
int sysctl_tcp_base_mss;
int sysctl_tcp_min_snd_mss;
int sysctl_tcp_probe_threshold;
u32 sysctl_tcp_probe_interval;

int sysctl_tcp_keepalive_time;
int sysctl_tcp_keepalive_intvl;
u8 sysctl_tcp_keepalive_probes;

u8 sysctl_tcp_syn_retries;
u8 sysctl_tcp_synack_retries;
u8 sysctl_tcp_syncookies;
u8 sysctl_tcp_migrate_req;
u8 sysctl_tcp_comp_sack_nr;
int sysctl_tcp_reordering;
u8 sysctl_tcp_retries1;
u8 sysctl_tcp_retries2;
u8 sysctl_tcp_orphan_retries;
u8 sysctl_tcp_tw_reuse;
int sysctl_tcp_fin_timeout;
unsigned int sysctl_tcp_notsent_lowat;
u8 sysctl_tcp_sack;
u8 sysctl_tcp_window_scaling;
u8 sysctl_tcp_timestamps;
u8 sysctl_tcp_early_retrans;
u8 sysctl_tcp_recovery;
u8 sysctl_tcp_thin_linear_timeouts;
u8 sysctl_tcp_slow_start_after_idle;
u8 sysctl_tcp_retrans_collapse;
u8 sysctl_tcp_stdurg;
u8 sysctl_tcp_rfc1337;
u8 sysctl_tcp_abort_on_overflow;
u8 sysctl_tcp_fack;
int sysctl_tcp_max_reordering;
int sysctl_tcp_adv_win_scale;
u8 sysctl_tcp_dsack;
u8 sysctl_tcp_app_win;
u8 sysctl_tcp_frto;
u8 sysctl_tcp_nometrics_save;
u8 sysctl_tcp_no_ssthresh_metrics_save;
u8 sysctl_tcp_moderate_rcvbuf;
u8 sysctl_tcp_tso_win_divisor;
u8 sysctl_tcp_workaround_signed_windows;
int sysctl_tcp_limit_output_bytes;
int sysctl_tcp_challenge_ack_limit;
int sysctl_tcp_min_rtt_wlen;
u8 sysctl_tcp_min_tso_segs;
u8 sysctl_tcp_tso_rtt_log;
u8 sysctl_tcp_autocorking;
u8 sysctl_tcp_reflect_tos;
int sysctl_tcp_invalid_ratelimit;
int sysctl_tcp_pacing_ss_ratio;
int sysctl_tcp_pacing_ca_ratio;
int sysctl_tcp_wmem[3];
int sysctl_tcp_rmem[3];
unsigned long sysctl_tcp_comp_sack_delay_ns;
unsigned long sysctl_tcp_comp_sack_slack_ns;
int sysctl_max_syn_backlog;
int sysctl_tcp_fastopen;
const struct tcp_congestion_ops *tcp_congestion_control;
struct tcp_fastopen_context *tcp_fastopen_ctx;
unsigned int sysctl_tcp_fastopen_blackhole_timeout;
atomic_t tfo_active_disable_times;
unsigned long tfo_active_disable_stamp;

int sysctl_udp_wmem_min;
int sysctl_udp_rmem_min;

u8 sysctl_fib_notify_on_flag_change;





u8 sysctl_igmp_llm_reports;
int sysctl_igmp_max_memberships;
int sysctl_igmp_max_msf;
int sysctl_igmp_qrv;

struct ping_group_range ping_group_range;

atomic_t dev_addr_genid;


unsigned long *sysctl_local_reserved_ports;
int sysctl_ip_prot_sock;
# 219 "./include/net/netns/ipv4.h"
struct fib_notifier_ops *notifier_ops;
unsigned int fib_seq;

struct fib_notifier_ops *ipmr_notifier_ops;
unsigned int ipmr_seq;

atomic_t rt_genid;
siphash_key_t ip_id_key;
};
# 21 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/ipv6.h" 1
# 10 "./include/net/netns/ipv6.h"
# 1 "./include/net/dst_ops.h" 1







struct dst_entry;
struct kmem_cachep;
struct net_device;
struct sk_buff;
struct sock;
struct net;

struct dst_ops {
unsigned short family;
unsigned int gc_thresh;

int (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
unsigned int (*mtu)(const struct dst_entry *);
u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev, int how);
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
bool confirm_neigh);
void (*redirect)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
struct neighbour * (*neigh_lookup)(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr);
void (*confirm_neigh)(const struct dst_entry *dst,
const void *daddr);

struct kmem_cache *kmem_cachep;

struct percpu_counter pcpuc_entries __attribute__((__aligned__((1 << 6))));
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dst_entries_get_fast(struct dst_ops *dst)
{
return percpu_counter_read_positive(&dst->pcpuc_entries);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dst_entries_get_slow(struct dst_ops *dst)
{
return percpu_counter_sum_positive(&dst->pcpuc_entries);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_entries_add(struct dst_ops *dst, int val)
{
percpu_counter_add_batch(&dst->pcpuc_entries, val,
32);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dst_entries_init(struct dst_ops *dst)
{
return ({ static struct lock_class_key __key; __percpu_counter_init(&dst->pcpuc_entries, 0, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)), &__key); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_entries_destroy(struct dst_ops *dst)
{
percpu_counter_destroy(&dst->pcpuc_entries);
}
# 11 "./include/net/netns/ipv6.h" 2
# 1 "./include/uapi/linux/icmpv6.h" 1







struct icmp6hdr {

__u8 icmp6_type;
__u8 icmp6_code;
__sum16 icmp6_cksum;


union {
__be32 un_data32[1];
__be16 un_data16[2];
__u8 un_data8[4];

struct icmpv6_echo {
__be16 identifier;
__be16 sequence;
} u_echo;

struct icmpv6_nd_advt {

__u32 reserved:5,
override:1,
solicited:1,
router:1,
reserved2:24;
# 40 "./include/uapi/linux/icmpv6.h"
} u_nd_advt;

struct icmpv6_nd_ra {
__u8 hop_limit;

__u8 reserved:3,
router_pref:2,
home_agent:1,
other:1,
managed:1;
# 60 "./include/uapi/linux/icmpv6.h"
__be16 rt_lifetime;
} u_nd_ra;

} icmp6_dataun;
# 81 "./include/uapi/linux/icmpv6.h"
};
# 161 "./include/uapi/linux/icmpv6.h"
struct icmp6_filter {
__u32 data[8];
};
# 12 "./include/net/netns/ipv6.h" 2

struct ctl_table_header;

struct netns_sysctl_ipv6 {

struct ctl_table_header *hdr;
struct ctl_table_header *route_hdr;
struct ctl_table_header *icmp_hdr;
struct ctl_table_header *frags_hdr;
struct ctl_table_header *xfrm6_hdr;

int flush_delay;
int ip6_rt_max_size;
int ip6_rt_gc_min_interval;
int ip6_rt_gc_timeout;
int ip6_rt_gc_interval;
int ip6_rt_gc_elasticity;
int ip6_rt_mtu_expires;
int ip6_rt_min_advmss;
u32 multipath_hash_fields;
u8 multipath_hash_policy;
u8 bindv6only;
u8 flowlabel_consistency;
u8 auto_flowlabels;
int icmpv6_time;
u8 icmpv6_echo_ignore_all;
u8 icmpv6_echo_ignore_multicast;
u8 icmpv6_echo_ignore_anycast;
unsigned long icmpv6_ratemask[(((255 + 1) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))];
unsigned long *icmpv6_ratemask_ptr;
u8 anycast_src_echo_reply;
u8 ip_nonlocal_bind;
u8 fwmark_reflect;
u8 flowlabel_state_ranges;
int idgen_retries;
int idgen_delay;
int flowlabel_reflect;
int max_dst_opts_cnt;
int max_hbh_opts_cnt;
int max_dst_opts_len;
int max_hbh_opts_len;
int seg6_flowlabel;
u32 ioam6_id;
u64 ioam6_id_wide;
bool skip_notify_on_dev_down;
u8 fib_notify_on_flag_change;
};

struct netns_ipv6 {

struct dst_ops ip6_dst_ops;

struct netns_sysctl_ipv6 sysctl;
struct ipv6_devconf *devconf_all;
struct ipv6_devconf *devconf_dflt;
struct inet_peer_base *peers;
struct fqdir *fqdir;
struct fib6_info *fib6_null_entry;
struct rt6_info *ip6_null_entry;
struct rt6_statistics *rt6_stats;
struct timer_list ip6_fib_timer;
struct hlist_head *fib_table_hash;
struct fib6_table *fib6_main_tbl;
struct list_head fib6_walkers;
rwlock_t fib6_walker_lock;
spinlock_t fib6_gc_lock;
atomic_t ip6_rt_gc_expire;
unsigned long ip6_rt_last_gc;
unsigned char flowlabel_has_excl;
# 92 "./include/net/netns/ipv6.h"
struct sock *ndisc_sk;
struct sock *tcp_sk;
struct sock *igmp_sk;
struct sock *mc_autojoin_sk;

struct hlist_head *inet6_addr_lst;
spinlock_t addrconf_hash_lock;
struct delayed_work addr_chk_work;
# 109 "./include/net/netns/ipv6.h"
atomic_t dev_addr_genid;
atomic_t fib6_sernum;
struct seg6_pernet_data *seg6_data;
struct fib_notifier_ops *notifier_ops;
struct fib_notifier_ops *ip6mr_notifier_ops;
unsigned int ipmr_seq;
struct {
struct hlist_head head;
spinlock_t lock;
u32 seq;
} ip6addrlbl_table;
struct ioam6_pernet_data *ioam6_data;
};
# 22 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/nexthop.h" 1
# 11 "./include/net/netns/nexthop.h"
struct netns_nexthop {
struct rb_root rb_root;
struct hlist_head *devhash;

unsigned int seq;
u32 last_id_allocated;
struct blocking_notifier_head notifier_chain;
};
# 23 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/ieee802154_6lowpan.h" 1
# 11 "./include/net/netns/ieee802154_6lowpan.h"
struct netns_sysctl_lowpan {

struct ctl_table_header *frags_hdr;

};

struct netns_ieee802154_lowpan {
struct netns_sysctl_lowpan sysctl;
struct fqdir *fqdir;
};
# 24 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/sctp.h" 1




struct sock;
struct proc_dir_entry;
struct sctp_mib;
struct ctl_table_header;

struct netns_sctp {
__typeof__(struct sctp_mib) *sctp_statistics;


struct proc_dir_entry *proc_net_sctp;


struct ctl_table_header *sysctl_header;





struct sock *ctl_sock;


struct sock *udp4_sock;
struct sock *udp6_sock;

int udp_port;

int encap_port;







struct list_head local_addr_list;
struct list_head addr_waitq;
struct timer_list addr_wq_timer;
struct list_head auto_asconf_splist;

spinlock_t addr_wq_lock;


spinlock_t local_addr_lock;
# 59 "./include/net/netns/sctp.h"
unsigned int rto_initial;
unsigned int rto_min;
unsigned int rto_max;




int rto_alpha;
int rto_beta;


int max_burst;


int cookie_preserve_enable;


char *sctp_hmac_alg;


unsigned int valid_cookie_life;


unsigned int sack_timeout;


unsigned int hb_interval;


unsigned int probe_interval;





int max_retrans_association;
int max_retrans_path;
int max_retrans_init;




int pf_retrans;





int ps_retrans;






int pf_enable;







int pf_expose;






int sndbuf_policy;






int rcvbuf_policy;

int default_auto_asconf;


int addip_enable;
int addip_noauth;


int prsctp_enable;


int reconf_enable;


int auth_enable;


int intl_enable;


int ecn_enable;
# 166 "./include/net/netns/sctp.h"
int scope_policy;




int rwnd_upd_shift;


unsigned long max_autoclose;
};
# 25 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/netfilter.h" 1




# 1 "./include/linux/netfilter_defs.h" 1




# 1 "./include/uapi/linux/netfilter.h" 1






# 1 "./include/linux/in.h" 1
# 19 "./include/linux/in.h"
# 1 "./include/uapi/linux/in.h" 1
# 28 "./include/uapi/linux/in.h"
enum {
IPPROTO_IP = 0,

IPPROTO_ICMP = 1,

IPPROTO_IGMP = 2,

IPPROTO_IPIP = 4,

IPPROTO_TCP = 6,

IPPROTO_EGP = 8,

IPPROTO_PUP = 12,

IPPROTO_UDP = 17,

IPPROTO_IDP = 22,

IPPROTO_TP = 29,

IPPROTO_DCCP = 33,

IPPROTO_IPV6 = 41,

IPPROTO_RSVP = 46,

IPPROTO_GRE = 47,

IPPROTO_ESP = 50,

IPPROTO_AH = 51,

IPPROTO_MTP = 92,

IPPROTO_BEETPH = 94,

IPPROTO_ENCAP = 98,

IPPROTO_PIM = 103,

IPPROTO_COMP = 108,

IPPROTO_SCTP = 132,

IPPROTO_UDPLITE = 136,

IPPROTO_MPLS = 137,

IPPROTO_ETHERNET = 143,

IPPROTO_RAW = 255,

IPPROTO_MPTCP = 262,

IPPROTO_MAX
};




struct in_addr {
__be32 s_addr;
};
# 173 "./include/uapi/linux/in.h"
struct ip_mreq {
struct in_addr imr_multiaddr;
struct in_addr imr_interface;
};

struct ip_mreqn {
struct in_addr imr_multiaddr;
struct in_addr imr_address;
int imr_ifindex;
};

struct ip_mreq_source {
__be32 imr_multiaddr;
__be32 imr_interface;
__be32 imr_sourceaddr;
};

struct ip_msfilter {
union {
struct {
__be32 imsf_multiaddr_aux;
__be32 imsf_interface_aux;
__u32 imsf_fmode_aux;
__u32 imsf_numsrc_aux;
__be32 imsf_slist[1];
};
struct {
__be32 imsf_multiaddr;
__be32 imsf_interface;
__u32 imsf_fmode;
__u32 imsf_numsrc;
__be32 imsf_slist_flex[];
};
};
};





struct group_req {
__u32 gr_interface;
struct __kernel_sockaddr_storage gr_group;
};

struct group_source_req {
__u32 gsr_interface;
struct __kernel_sockaddr_storage gsr_group;
struct __kernel_sockaddr_storage gsr_source;
};

struct group_filter {
union {
struct {
__u32 gf_interface_aux;
struct __kernel_sockaddr_storage gf_group_aux;
__u32 gf_fmode_aux;
__u32 gf_numsrc_aux;
struct __kernel_sockaddr_storage gf_slist[1];
};
struct {
__u32 gf_interface;
struct __kernel_sockaddr_storage gf_group;
__u32 gf_fmode;
__u32 gf_numsrc;
struct __kernel_sockaddr_storage gf_slist_flex[];
};
};
};







struct in_pktinfo {
int ipi_ifindex;
struct in_addr ipi_spec_dst;
struct in_addr ipi_addr;
};





struct sockaddr_in {
__kernel_sa_family_t sin_family;
__be16 sin_port;
struct in_addr sin_addr;


unsigned char __pad[16 - sizeof(short int) -
sizeof(unsigned short int) - sizeof(struct in_addr)];
};
# 20 "./include/linux/in.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int proto_ports_offset(int proto)
{
switch (proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_DCCP:
case IPPROTO_ESP:
case IPPROTO_SCTP:
case IPPROTO_UDPLITE:
return 0;
case IPPROTO_AH:
return 4;
default:
return -22;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_is_loopback(__be32 addr)
{
return (addr & (( __be32)(__builtin_constant_p((__u32)((0xff000000))) ? ((__u32)( (((__u32)((0xff000000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xff000000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xff000000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xff000000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xff000000))))) == (( __be32)(__builtin_constant_p((__u32)((0x7f000000))) ? ((__u32)( (((__u32)((0x7f000000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x7f000000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x7f000000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x7f000000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x7f000000))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_is_multicast(__be32 addr)
{
return (addr & (( __be32)(__builtin_constant_p((__u32)((0xf0000000))) ? ((__u32)( (((__u32)((0xf0000000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xf0000000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xf0000000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xf0000000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xf0000000))))) == (( __be32)(__builtin_constant_p((__u32)((0xe0000000))) ? ((__u32)( (((__u32)((0xe0000000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xe0000000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xe0000000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xe0000000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xe0000000))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_is_local_multicast(__be32 addr)
{
return (addr & (( __be32)(__builtin_constant_p((__u32)((0xffffff00))) ? ((__u32)( (((__u32)((0xffffff00)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xffffff00)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xffffff00)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xffffff00)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xffffff00))))) == (( __be32)(__builtin_constant_p((__u32)((0xe0000000))) ? ((__u32)( (((__u32)((0xe0000000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xe0000000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xe0000000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xe0000000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xe0000000))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_is_lbcast(__be32 addr)
{

return addr == (( __be32)(__builtin_constant_p((__u32)((((unsigned long int) 0xffffffff)))) ? ((__u32)( (((__u32)((((unsigned long int) 0xffffffff))) & (__u32)0x000000ffUL) << 24) | (((__u32)((((unsigned long int) 0xffffffff))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((((unsigned long int) 0xffffffff))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((((unsigned long int) 0xffffffff))) & (__u32)0xff000000UL) >> 24))) : __fswab32((((unsigned long int) 0xffffffff)))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_is_all_snoopers(__be32 addr)
{
return addr == (( __be32)(__builtin_constant_p((__u32)((0xe000006aU))) ? ((__u32)( (((__u32)((0xe000006aU)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xe000006aU)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xe000006aU)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xe000006aU)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xe000006aU))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_is_zeronet(__be32 addr)
{
return (addr == 0);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_is_private_10(__be32 addr)
{
return (addr & (( __be32)(__builtin_constant_p((__u32)((0xff000000))) ? ((__u32)( (((__u32)((0xff000000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xff000000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xff000000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xff000000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xff000000))))) == (( __be32)(__builtin_constant_p((__u32)((0x0a000000))) ? ((__u32)( (((__u32)((0x0a000000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x0a000000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x0a000000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x0a000000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x0a000000))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_is_private_172(__be32 addr)
{
return (addr & (( __be32)(__builtin_constant_p((__u32)((0xfff00000))) ? ((__u32)( (((__u32)((0xfff00000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xfff00000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xfff00000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xfff00000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xfff00000))))) == (( __be32)(__builtin_constant_p((__u32)((0xac100000))) ? ((__u32)( (((__u32)((0xac100000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xac100000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xac100000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xac100000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xac100000))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_is_private_192(__be32 addr)
{
return (addr & (( __be32)(__builtin_constant_p((__u32)((0xffff0000))) ? ((__u32)( (((__u32)((0xffff0000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xffff0000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xffff0000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xffff0000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xffff0000))))) == (( __be32)(__builtin_constant_p((__u32)((0xc0a80000))) ? ((__u32)( (((__u32)((0xc0a80000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xc0a80000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xc0a80000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xc0a80000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xc0a80000))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_is_linklocal_169(__be32 addr)
{
return (addr & (( __be32)(__builtin_constant_p((__u32)((0xffff0000))) ? ((__u32)( (((__u32)((0xffff0000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xffff0000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xffff0000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xffff0000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xffff0000))))) == (( __be32)(__builtin_constant_p((__u32)((0xa9fe0000))) ? ((__u32)( (((__u32)((0xa9fe0000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xa9fe0000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xa9fe0000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xa9fe0000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xa9fe0000))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_is_anycast_6to4(__be32 addr)
{
return (addr & (( __be32)(__builtin_constant_p((__u32)((0xffffff00))) ? ((__u32)( (((__u32)((0xffffff00)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xffffff00)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xffffff00)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xffffff00)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xffffff00))))) == (( __be32)(__builtin_constant_p((__u32)((0xc0586300))) ? ((__u32)( (((__u32)((0xc0586300)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xc0586300)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xc0586300)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xc0586300)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xc0586300))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_is_test_192(__be32 addr)
{
return (addr & (( __be32)(__builtin_constant_p((__u32)((0xffffff00))) ? ((__u32)( (((__u32)((0xffffff00)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xffffff00)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xffffff00)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xffffff00)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xffffff00))))) == (( __be32)(__builtin_constant_p((__u32)((0xc0000200))) ? ((__u32)( (((__u32)((0xc0000200)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xc0000200)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xc0000200)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xc0000200)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xc0000200))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_is_test_198(__be32 addr)
{
return (addr & (( __be32)(__builtin_constant_p((__u32)((0xfffe0000))) ? ((__u32)( (((__u32)((0xfffe0000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xfffe0000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xfffe0000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xfffe0000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xfffe0000))))) == (( __be32)(__builtin_constant_p((__u32)((0xc6120000))) ? ((__u32)( (((__u32)((0xc6120000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xc6120000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xc6120000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xc6120000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xc6120000))));
}
# 8 "./include/uapi/linux/netfilter.h" 2
# 42 "./include/uapi/linux/netfilter.h"
enum nf_inet_hooks {
NF_INET_PRE_ROUTING,
NF_INET_LOCAL_IN,
NF_INET_FORWARD,
NF_INET_LOCAL_OUT,
NF_INET_POST_ROUTING,
NF_INET_NUMHOOKS,
NF_INET_INGRESS = NF_INET_NUMHOOKS,
};

enum nf_dev_hooks {
NF_NETDEV_INGRESS,
NF_NETDEV_EGRESS,
NF_NETDEV_NUMHOOKS
};

enum {
NFPROTO_UNSPEC = 0,
NFPROTO_INET = 1,
NFPROTO_IPV4 = 2,
NFPROTO_ARP = 3,
NFPROTO_NETDEV = 5,
NFPROTO_BRIDGE = 7,
NFPROTO_IPV6 = 10,
NFPROTO_DECNET = 12,
NFPROTO_NUMPROTO,
};

union nf_inet_addr {
__u32 all[4];
__be32 ip;
__be32 ip6[4];
struct in_addr in;
struct in6_addr in6;
};
# 6 "./include/linux/netfilter_defs.h" 2
# 6 "./include/net/netns/netfilter.h" 2

struct proc_dir_entry;
struct nf_logger;
struct nf_queue_handler;

struct netns_nf {

struct proc_dir_entry *proc_netfilter;

const struct nf_logger *nf_loggers[NFPROTO_NUMPROTO];

struct ctl_table_header *nf_log_dir_header;

struct nf_hook_entries *hooks_ipv4[NF_INET_NUMHOOKS];
struct nf_hook_entries *hooks_ipv6[NF_INET_NUMHOOKS];
# 36 "./include/net/netns/netfilter.h"
};
# 26 "./include/net/net_namespace.h" 2



# 1 "./include/net/netns/nftables.h" 1






struct netns_nftables {
u8 gencursor;
};
# 30 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/xfrm.h" 1








# 1 "./include/uapi/linux/xfrm.h" 1
# 15 "./include/uapi/linux/xfrm.h"
typedef union {
__be32 a4;
__be32 a6[4];
struct in6_addr in6;
} xfrm_address_t;





struct xfrm_id {
xfrm_address_t daddr;
__be32 spi;
__u8 proto;
};

struct xfrm_sec_ctx {
__u8 ctx_doi;
__u8 ctx_alg;
__u16 ctx_len;
__u32 ctx_sid;
char ctx_str[0];
};
# 49 "./include/uapi/linux/xfrm.h"
struct xfrm_selector {
xfrm_address_t daddr;
xfrm_address_t saddr;
__be16 dport;
__be16 dport_mask;
__be16 sport;
__be16 sport_mask;
__u16 family;
__u8 prefixlen_d;
__u8 prefixlen_s;
__u8 proto;
int ifindex;
__kernel_uid32_t user;
};



struct xfrm_lifetime_cfg {
__u64 soft_byte_limit;
__u64 hard_byte_limit;
__u64 soft_packet_limit;
__u64 hard_packet_limit;
__u64 soft_add_expires_seconds;
__u64 hard_add_expires_seconds;
__u64 soft_use_expires_seconds;
__u64 hard_use_expires_seconds;
};

struct xfrm_lifetime_cur {
__u64 bytes;
__u64 packets;
__u64 add_time;
__u64 use_time;
};

struct xfrm_replay_state {
__u32 oseq;
__u32 seq;
__u32 bitmap;
};



struct xfrm_replay_state_esn {
unsigned int bmp_len;
__u32 oseq;
__u32 seq;
__u32 oseq_hi;
__u32 seq_hi;
__u32 replay_window;
__u32 bmp[0];
};

struct xfrm_algo {
char alg_name[64];
unsigned int alg_key_len;
char alg_key[0];
};

struct xfrm_algo_auth {
char alg_name[64];
unsigned int alg_key_len;
unsigned int alg_trunc_len;
char alg_key[0];
};

struct xfrm_algo_aead {
char alg_name[64];
unsigned int alg_key_len;
unsigned int alg_icv_len;
char alg_key[0];
};

struct xfrm_stats {
__u32 replay_window;
__u32 replay;
__u32 integrity_failed;
};

enum {
XFRM_POLICY_TYPE_MAIN = 0,
XFRM_POLICY_TYPE_SUB = 1,
XFRM_POLICY_TYPE_MAX = 2,
XFRM_POLICY_TYPE_ANY = 255
};

enum {
XFRM_POLICY_IN = 0,
XFRM_POLICY_OUT = 1,
XFRM_POLICY_FWD = 2,
XFRM_POLICY_MASK = 3,
XFRM_POLICY_MAX = 3
};

enum {
XFRM_SHARE_ANY,
XFRM_SHARE_SESSION,
XFRM_SHARE_USER,
XFRM_SHARE_UNIQUE
};
# 158 "./include/uapi/linux/xfrm.h"
enum {
XFRM_MSG_BASE = 0x10,

XFRM_MSG_NEWSA = 0x10,

XFRM_MSG_DELSA,

XFRM_MSG_GETSA,


XFRM_MSG_NEWPOLICY,

XFRM_MSG_DELPOLICY,

XFRM_MSG_GETPOLICY,


XFRM_MSG_ALLOCSPI,

XFRM_MSG_ACQUIRE,

XFRM_MSG_EXPIRE,


XFRM_MSG_UPDPOLICY,

XFRM_MSG_UPDSA,


XFRM_MSG_POLEXPIRE,


XFRM_MSG_FLUSHSA,

XFRM_MSG_FLUSHPOLICY,


XFRM_MSG_NEWAE,

XFRM_MSG_GETAE,


XFRM_MSG_REPORT,


XFRM_MSG_MIGRATE,


XFRM_MSG_NEWSADINFO,

XFRM_MSG_GETSADINFO,


XFRM_MSG_NEWSPDINFO,

XFRM_MSG_GETSPDINFO,


XFRM_MSG_MAPPING,


XFRM_MSG_SETDEFAULT,

XFRM_MSG_GETDEFAULT,

__XFRM_MSG_MAX
};
# 233 "./include/uapi/linux/xfrm.h"
struct xfrm_user_sec_ctx {
__u16 len;
__u16 exttype;
__u8 ctx_alg;
__u8 ctx_doi;
__u16 ctx_len;
};

struct xfrm_user_tmpl {
struct xfrm_id id;
__u16 family;
xfrm_address_t saddr;
__u32 reqid;
__u8 mode;
__u8 share;
__u8 optional;
__u32 aalgos;
__u32 ealgos;
__u32 calgos;
};

struct xfrm_encap_tmpl {
__u16 encap_type;
__be16 encap_sport;
__be16 encap_dport;
xfrm_address_t encap_oa;
};


enum xfrm_ae_ftype_t {
XFRM_AE_UNSPEC,
XFRM_AE_RTHR=1,
XFRM_AE_RVAL=2,
XFRM_AE_LVAL=4,
XFRM_AE_ETHR=8,
XFRM_AE_CR=16,
XFRM_AE_CE=32,
XFRM_AE_CU=64,
__XFRM_AE_MAX


};

struct xfrm_userpolicy_type {
__u8 type;
__u16 reserved1;
__u8 reserved2;
};


enum xfrm_attr_type_t {
XFRMA_UNSPEC,
XFRMA_ALG_AUTH,
XFRMA_ALG_CRYPT,
XFRMA_ALG_COMP,
XFRMA_ENCAP,
XFRMA_TMPL,
XFRMA_SA,
XFRMA_POLICY,
XFRMA_SEC_CTX,
XFRMA_LTIME_VAL,
XFRMA_REPLAY_VAL,
XFRMA_REPLAY_THRESH,
XFRMA_ETIMER_THRESH,
XFRMA_SRCADDR,
XFRMA_COADDR,
XFRMA_LASTUSED,
XFRMA_POLICY_TYPE,
XFRMA_MIGRATE,
XFRMA_ALG_AEAD,
XFRMA_KMADDRESS,
XFRMA_ALG_AUTH_TRUNC,
XFRMA_MARK,
XFRMA_TFCPAD,
XFRMA_REPLAY_ESN_VAL,
XFRMA_SA_EXTRA_FLAGS,
XFRMA_PROTO,
XFRMA_ADDRESS_FILTER,
XFRMA_PAD,
XFRMA_OFFLOAD_DEV,
XFRMA_SET_MARK,
XFRMA_SET_MARK_MASK,
XFRMA_IF_ID,
XFRMA_MTIMER_THRESH,
__XFRMA_MAX



};

struct xfrm_mark {
__u32 v;
__u32 m;
};

enum xfrm_sadattr_type_t {
XFRMA_SAD_UNSPEC,
XFRMA_SAD_CNT,
XFRMA_SAD_HINFO,
__XFRMA_SAD_MAX


};

struct xfrmu_sadhinfo {
__u32 sadhcnt;
__u32 sadhmcnt;
};

enum xfrm_spdattr_type_t {
XFRMA_SPD_UNSPEC,
XFRMA_SPD_INFO,
XFRMA_SPD_HINFO,
XFRMA_SPD_IPV4_HTHRESH,
XFRMA_SPD_IPV6_HTHRESH,
__XFRMA_SPD_MAX


};

struct xfrmu_spdinfo {
__u32 incnt;
__u32 outcnt;
__u32 fwdcnt;
__u32 inscnt;
__u32 outscnt;
__u32 fwdscnt;
};

struct xfrmu_spdhinfo {
__u32 spdhcnt;
__u32 spdhmcnt;
};

struct xfrmu_spdhthresh {
__u8 lbits;
__u8 rbits;
};

struct xfrm_usersa_info {
struct xfrm_selector sel;
struct xfrm_id id;
xfrm_address_t saddr;
struct xfrm_lifetime_cfg lft;
struct xfrm_lifetime_cur curlft;
struct xfrm_stats stats;
__u32 seq;
__u32 reqid;
__u16 family;
__u8 mode;
__u8 replay_window;
__u8 flags;
# 393 "./include/uapi/linux/xfrm.h"
};




struct xfrm_usersa_id {
xfrm_address_t daddr;
__be32 spi;
__u16 family;
__u8 proto;
};

struct xfrm_aevent_id {
struct xfrm_usersa_id sa_id;
xfrm_address_t saddr;
__u32 flags;
__u32 reqid;
};

struct xfrm_userspi_info {
struct xfrm_usersa_info info;
__u32 min;
__u32 max;
};

struct xfrm_userpolicy_info {
struct xfrm_selector sel;
struct xfrm_lifetime_cfg lft;
struct xfrm_lifetime_cur curlft;
__u32 priority;
__u32 index;
__u8 dir;
__u8 action;


__u8 flags;



__u8 share;
};

struct xfrm_userpolicy_id {
struct xfrm_selector sel;
__u32 index;
__u8 dir;
};

struct xfrm_user_acquire {
struct xfrm_id id;
xfrm_address_t saddr;
struct xfrm_selector sel;
struct xfrm_userpolicy_info policy;
__u32 aalgos;
__u32 ealgos;
__u32 calgos;
__u32 seq;
};

struct xfrm_user_expire {
struct xfrm_usersa_info state;
__u8 hard;
};

struct xfrm_user_polexpire {
struct xfrm_userpolicy_info pol;
__u8 hard;
};

struct xfrm_usersa_flush {
__u8 proto;
};

struct xfrm_user_report {
__u8 proto;
struct xfrm_selector sel;
};



struct xfrm_user_kmaddress {
xfrm_address_t local;
xfrm_address_t remote;
__u32 reserved;
__u16 family;
};

struct xfrm_user_migrate {
xfrm_address_t old_daddr;
xfrm_address_t old_saddr;
xfrm_address_t new_daddr;
xfrm_address_t new_saddr;
__u8 proto;
__u8 mode;
__u16 reserved;
__u32 reqid;
__u16 old_family;
__u16 new_family;
};

struct xfrm_user_mapping {
struct xfrm_usersa_id id;
__u32 reqid;
xfrm_address_t old_saddr;
xfrm_address_t new_saddr;
__be16 old_sport;
__be16 new_sport;
};

struct xfrm_address_filter {
xfrm_address_t saddr;
xfrm_address_t daddr;
__u16 family;
__u8 splen;
__u8 dplen;
};

struct xfrm_user_offload {
int ifindex;
__u8 flags;
};
# 523 "./include/uapi/linux/xfrm.h"
struct xfrm_userpolicy_default {



__u8 in;
__u8 fwd;
__u8 out;
};
# 541 "./include/uapi/linux/xfrm.h"
enum xfrm_nlgroups {
XFRMNLGRP_NONE,

XFRMNLGRP_ACQUIRE,

XFRMNLGRP_EXPIRE,

XFRMNLGRP_SA,

XFRMNLGRP_POLICY,

XFRMNLGRP_AEVENTS,

XFRMNLGRP_REPORT,

XFRMNLGRP_MIGRATE,

XFRMNLGRP_MAPPING,

__XFRMNLGRP_MAX
};
# 10 "./include/net/netns/xfrm.h" 2


struct ctl_table_header;

struct xfrm_policy_hash {
struct hlist_head *table;
unsigned int hmask;
u8 dbits4;
u8 sbits4;
u8 dbits6;
u8 sbits6;
};

struct xfrm_policy_hthresh {
struct work_struct work;
seqlock_t lock;
u8 lbits4;
u8 rbits4;
u8 lbits6;
u8 rbits6;
};

struct netns_xfrm {
struct list_head state_all;
# 42 "./include/net/netns/xfrm.h"
struct hlist_head *state_bydst;
struct hlist_head *state_bysrc;
struct hlist_head *state_byspi;
struct hlist_head *state_byseq;
unsigned int state_hmask;
unsigned int state_num;
struct work_struct state_hash_work;

struct list_head policy_all;
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
struct hlist_head policy_inexact[XFRM_POLICY_MAX];
struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
struct xfrm_policy_hthresh policy_hthresh;
struct list_head inexact_bins;


struct sock *nlsk;
struct sock *nlsk_stash;

u32 sysctl_aevent_etime;
u32 sysctl_aevent_rseqth;
int sysctl_larval_drop;
u32 sysctl_acq_expires;

u8 policy_default[XFRM_POLICY_MAX];


struct ctl_table_header *sysctl_hdr;


struct dst_ops xfrm4_dst_ops;

struct dst_ops xfrm6_dst_ops;

spinlock_t xfrm_state_lock;
seqcount_spinlock_t xfrm_state_hash_generation;
seqcount_spinlock_t xfrm_policy_hash_generation;

spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
};
# 31 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/mpls.h" 1








struct mpls_route;
struct ctl_table_header;

struct netns_mpls {
int ip_ttl_propagate;
int default_ttl;
size_t platform_labels;
struct mpls_route * *platform_label;

struct ctl_table_header *ctl;
};
# 32 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/can.h" 1
# 11 "./include/net/netns/can.h"
struct can_dev_rcv_lists;
struct can_pkg_stats;
struct can_rcv_lists_stats;

struct netns_can {

struct proc_dir_entry *proc_dir;
struct proc_dir_entry *pde_stats;
struct proc_dir_entry *pde_reset_stats;
struct proc_dir_entry *pde_rcvlist_all;
struct proc_dir_entry *pde_rcvlist_fil;
struct proc_dir_entry *pde_rcvlist_inv;
struct proc_dir_entry *pde_rcvlist_sff;
struct proc_dir_entry *pde_rcvlist_eff;
struct proc_dir_entry *pde_rcvlist_err;
struct proc_dir_entry *bcmproc_dir;



struct can_dev_rcv_lists *rx_alldev_list;
spinlock_t rcvlists_lock;
struct timer_list stattimer;
struct can_pkg_stats *pkg_stats;
struct can_rcv_lists_stats *rcv_lists_stats;


struct hlist_head cgw_list;
};
# 33 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/xdp.h" 1







struct netns_xdp {
struct mutex lock;
struct hlist_head list;
};
# 34 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/smc.h" 1






struct smc_stats_rsn;
struct smc_stats;
struct netns_smc {

struct smc_stats *smc_stats;

struct mutex mutex_fback_rsn;
struct smc_stats_rsn *fback_rsn;

bool limit_smc_hs;

struct ctl_table_header *smc_hdr;

unsigned int sysctl_autocorking_size;
};
# 35 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/bpf.h" 1
# 11 "./include/net/netns/bpf.h"
struct bpf_prog;
struct bpf_prog_array;

enum netns_bpf_attach_type {
NETNS_BPF_INVALID = -1,
NETNS_BPF_FLOW_DISSECTOR = 0,
NETNS_BPF_SK_LOOKUP,
MAX_NETNS_BPF_ATTACH_TYPE
};

struct netns_bpf {

struct bpf_prog_array *run_array[MAX_NETNS_BPF_ATTACH_TYPE];
struct bpf_prog *progs[MAX_NETNS_BPF_ATTACH_TYPE];
struct list_head links[MAX_NETNS_BPF_ATTACH_TYPE];
};
# 36 "./include/net/net_namespace.h" 2
# 1 "./include/net/netns/mctp.h" 1
# 11 "./include/net/netns/mctp.h"
struct netns_mctp {

struct list_head routes;





struct mutex bind_lock;
struct hlist_head binds;




spinlock_t keys_lock;
struct hlist_head keys;


unsigned int default_net;


struct mutex neigh_lock;
struct list_head neighbours;
};
# 37 "./include/net/net_namespace.h" 2
# 1 "./include/net/net_trackers.h" 1



# 1 "./include/linux/ref_tracker.h" 1








struct ref_tracker;

struct ref_tracker_dir {
# 21 "./include/linux/ref_tracker.h"
};
# 50 "./include/linux/ref_tracker.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ref_tracker_dir_init(struct ref_tracker_dir *dir,
unsigned int quarantine_count)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ref_tracker_dir_print(struct ref_tracker_dir *dir,
unsigned int display_limit)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ref_tracker_alloc(struct ref_tracker_dir *dir,
struct ref_tracker **trackerp,
gfp_t gfp)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ref_tracker_free(struct ref_tracker_dir *dir,
struct ref_tracker **trackerp)
{
return 0;
}
# 5 "./include/net/net_trackers.h" 2




typedef struct {} netdevice_tracker;





typedef struct {} netns_tracker;
# 38 "./include/net/net_namespace.h" 2
# 1 "./include/linux/ns_common.h" 1






struct proc_ns_operations;

struct ns_common {
atomic_long_t stashed;
const struct proc_ns_operations *ops;
unsigned int inum;
refcount_t count;
};
# 39 "./include/net/net_namespace.h" 2
# 1 "./include/linux/idr.h" 1
# 19 "./include/linux/idr.h"
struct idr {
struct xarray idr_rt;
unsigned int idr_base;
unsigned int idr_next;
};
# 66 "./include/linux/idr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int idr_get_cursor(const struct idr *idr)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_220(void) ; if (!((sizeof(idr->idr_next) == sizeof(char) || sizeof(idr->idr_next) == sizeof(short) || sizeof(idr->idr_next) == sizeof(int) || sizeof(idr->idr_next) == sizeof(long)) || sizeof(idr->idr_next) == sizeof(long long))) __compiletime_assert_220(); } while (0); (*(const volatile typeof( _Generic((idr->idr_next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (idr->idr_next))) *)&(idr->idr_next)); });
}
# 79 "./include/linux/idr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void idr_set_cursor(struct idr *idr, unsigned int val)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_221(void) ; if (!((sizeof(idr->idr_next) == sizeof(char) || sizeof(idr->idr_next) == sizeof(short) || sizeof(idr->idr_next) == sizeof(int) || sizeof(idr->idr_next) == sizeof(long)) || sizeof(idr->idr_next) == sizeof(long long))) __compiletime_assert_221(); } while (0); do { *(volatile typeof(idr->idr_next) *)&(idr->idr_next) = (val); } while (0); } while (0);
}
# 112 "./include/linux/idr.h"
void idr_preload(gfp_t gfp_mask);

int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
int __attribute__((__warn_unused_result__)) idr_alloc_u32(struct idr *, void *ptr, u32 *id,
unsigned long max, gfp_t);
int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
void *idr_remove(struct idr *, unsigned long id);
void *idr_find(const struct idr *, unsigned long id);
int idr_for_each(const struct idr *,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *, int *nextid);
void *idr_get_next_ul(struct idr *, unsigned long *nextid);
void *idr_replace(struct idr *, void *, unsigned long id);
void idr_destroy(struct idr *);
# 135 "./include/linux/idr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void idr_init_base(struct idr *idr, int base)
{
xa_init_flags(&idr->idr_rt, ((( gfp_t)4) | ( gfp_t) (1 << (((27 + 1)) + 0))));
idr->idr_base = base;
idr->idr_next = 0;
}
# 149 "./include/linux/idr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void idr_init(struct idr *idr)
{
idr_init_base(idr, 0);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool idr_is_empty(const struct idr *idr)
{
return radix_tree_empty(&idr->idr_rt) &&
radix_tree_tagged(&idr->idr_rt, 0);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void idr_preload_end(void)
{
do { local_lock_release(({ do { const void *__vpp_verify = (typeof((&radix_tree_preloads.lock) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&radix_tree_preloads.lock)) *)(&radix_tree_preloads.lock)); (typeof((typeof(*(&radix_tree_preloads.lock)) *)(&radix_tree_preloads.lock))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0);
}
# 240 "./include/linux/idr.h"
struct ida_bitmap {
unsigned long bitmap[(128 / sizeof(long))];
};

struct ida {
struct xarray xa;
};
# 255 "./include/linux/idr.h"
int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
void ida_free(struct ida *, unsigned int id);
void ida_destroy(struct ida *ida);
# 271 "./include/linux/idr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ida_alloc(struct ida *ida, gfp_t gfp)
{
return ida_alloc_range(ida, 0, ~0, gfp);
}
# 289 "./include/linux/idr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
{
return ida_alloc_range(ida, min, ~0, gfp);
}
# 307 "./include/linux/idr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
{
return ida_alloc_range(ida, 0, max, gfp);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ida_init(struct ida *ida)
{
xa_init_flags(&ida->xa, ((( gfp_t)XA_LOCK_IRQ) | ((( gfp_t)4U) | (( gfp_t)((1U << (27 + 1)) << ( unsigned)((( xa_mark_t)0U)))))));
}
# 325 "./include/linux/idr.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ida_is_empty(const struct ida *ida)
{
return xa_empty(&ida->xa);
}
# 40 "./include/net/net_namespace.h" 2
# 1 "./include/linux/skbuff.h" 1
# 17 "./include/linux/skbuff.h"
# 1 "./include/linux/bvec.h" 1
# 10 "./include/linux/bvec.h"
# 1 "./include/linux/highmem.h" 1







# 1 "./include/linux/cacheflush.h" 1




# 1 "./arch/riscv/include/asm/cacheflush.h" 1
# 11 "./arch/riscv/include/asm/cacheflush.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void local_flush_icache_all(void)
{
asm volatile ("fence.i" ::: "memory");
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_dcache_page(struct page *page)
{
if (arch_test_bit(PG_arch_1, &page->flags))
clear_bit(PG_arch_1, &page->flags);
}
# 40 "./arch/riscv/include/asm/cacheflush.h"
void flush_icache_all(void);
void flush_icache_mm(struct mm_struct *mm, bool local);
# 51 "./arch/riscv/include/asm/cacheflush.h"
# 1 "./include/asm-generic/cacheflush.h" 1




struct mm_struct;
struct vm_area_struct;
struct page;
struct address_space;






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_cache_all(void)
{
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_cache_mm(struct mm_struct *mm)
{
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_cache_dup_mm(struct mm_struct *mm)
{
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr,
unsigned long pfn)
{
}
# 57 "./include/asm-generic/cacheflush.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_dcache_mmap_lock(struct address_space *mapping)
{
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_dcache_mmap_unlock(struct address_space *mapping)
{
}
# 79 "./include/asm-generic/cacheflush.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
# 94 "./include/asm-generic/cacheflush.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_cache_vmap(unsigned long start, unsigned long end)
{
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
# 52 "./arch/riscv/include/asm/cacheflush.h" 2
# 6 "./include/linux/cacheflush.h" 2

struct folio;



void flush_dcache_folio(struct folio *folio);
# 9 "./include/linux/highmem.h" 2


# 1 "./include/linux/hardirq.h" 1




# 1 "./include/linux/context_tracking_state.h" 1





# 1 "./include/linux/static_key.h" 1
# 7 "./include/linux/context_tracking_state.h" 2

struct context_tracking {






bool active;
int recursion;
enum ctx_state {
CONTEXT_DISABLED = -1,
CONTEXT_KERNEL = 0,
CONTEXT_USER,
CONTEXT_GUEST,
} state;
};
# 49 "./include/linux/context_tracking_state.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool context_tracking_in_user(void) { return false; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool context_tracking_enabled(void) { return false; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool context_tracking_enabled_cpu(int cpu) { return false; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool context_tracking_enabled_this_cpu(void) { return false; }
# 6 "./include/linux/hardirq.h" 2


# 1 "./include/linux/ftrace_irq.h" 1
# 11 "./include/linux/ftrace_irq.h"
extern bool trace_osnoise_callback_enabled;
extern void trace_osnoise_callback(bool enter);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ftrace_nmi_enter(void)
{





if (trace_osnoise_callback_enabled)
trace_osnoise_callback(true);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ftrace_nmi_exit(void)
{





if (trace_osnoise_callback_enabled)
trace_osnoise_callback(false);

}
# 9 "./include/linux/hardirq.h" 2

# 1 "./include/linux/vtime.h" 1
# 28 "./include/linux/vtime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vtime_user_enter(struct task_struct *tsk) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vtime_user_exit(struct task_struct *tsk) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vtime_guest_enter(struct task_struct *tsk) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vtime_guest_exit(struct task_struct *tsk) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vtime_init_idle(struct task_struct *tsk, int cpu) { }
# 41 "./include/linux/vtime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vtime_account_softirq(struct task_struct *tsk) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vtime_account_hardirq(struct task_struct *tsk) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vtime_flush(struct task_struct *tsk) { }
# 116 "./include/linux/vtime.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool vtime_accounting_enabled_this_cpu(void) { return false; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vtime_task_switch(struct task_struct *prev) { }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void vtime_account_guest_enter(void)
{
get_current()->flags |= 0x00000001;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void vtime_account_guest_exit(void)
{
get_current()->flags &= ~0x00000001;
}





extern void irqtime_account_irq(struct task_struct *tsk, unsigned int offset);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void account_softirq_enter(struct task_struct *tsk)
{
vtime_account_irq(tsk, (1UL << (0 + 8)));
irqtime_account_irq(tsk, (1UL << (0 + 8)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void account_softirq_exit(struct task_struct *tsk)
{
vtime_account_softirq(tsk);
irqtime_account_irq(tsk, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void account_hardirq_enter(struct task_struct *tsk)
{
vtime_account_irq(tsk, (1UL << ((0 + 8) + 8)));
irqtime_account_irq(tsk, (1UL << ((0 + 8) + 8)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void account_hardirq_exit(struct task_struct *tsk)
{
vtime_account_hardirq(tsk);
irqtime_account_irq(tsk, 0);
}
# 11 "./include/linux/hardirq.h" 2
# 1 "./arch/riscv/include/generated/asm/hardirq.h" 1
# 1 "./include/asm-generic/hardirq.h" 1







typedef struct {
unsigned int __softirq_pending;



} __attribute__((__aligned__((1 << 6)))) irq_cpustat_t;

extern __attribute__((section(".data..percpu" "..shared_aligned"))) __typeof__(irq_cpustat_t) irq_stat __attribute__((__aligned__((1 << 6))));


# 1 "./include/linux/irq.h" 1
# 16 "./include/linux/irq.h"
# 1 "./include/linux/irqhandler.h" 1
# 10 "./include/linux/irqhandler.h"
struct irq_desc;
struct irq_data;
typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
# 17 "./include/linux/irq.h" 2



# 1 "./include/linux/io.h" 1
# 13 "./include/linux/io.h"
# 1 "./arch/riscv/include/asm/io.h" 1
# 17 "./arch/riscv/include/asm/io.h"
# 1 "./arch/riscv/include/generated/asm/early_ioremap.h" 1
# 1 "./include/asm-generic/early_ioremap.h" 1
# 11 "./include/asm-generic/early_ioremap.h"
extern void *early_ioremap(resource_size_t phys_addr,
unsigned long size);
extern void *early_memremap(resource_size_t phys_addr,
unsigned long size);
extern void *early_memremap_ro(resource_size_t phys_addr,
unsigned long size);
extern void *early_memremap_prot(resource_size_t phys_addr,
unsigned long size, unsigned long prot_val);
extern void early_iounmap(void *addr, unsigned long size);
extern void early_memunmap(void *addr, unsigned long size);



extern void early_ioremap_init(void);


extern void early_ioremap_setup(void);





extern void early_ioremap_reset(void);




extern void copy_from_early_mem(void *dest, phys_addr_t src,
unsigned long size);
# 2 "./arch/riscv/include/generated/asm/early_ioremap.h" 2
# 18 "./arch/riscv/include/asm/io.h" 2





# 1 "./arch/riscv/include/asm/mmio.h" 1
# 19 "./arch/riscv/include/asm/mmio.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_writeb(u8 val, volatile void *addr)
{
asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_writew(u16 val, volatile void *addr)
{
asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_writel(u32 val, volatile void *addr)
{
asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __raw_writeq(u64 val, volatile void *addr)
{
asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 __raw_readb(const volatile void *addr)
{
u8 val;

asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 __raw_readw(const volatile void *addr)
{
u16 val;

asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 __raw_readl(const volatile void *addr)
{
u32 val;

asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 __raw_readq(const volatile void *addr)
{
u64 val;

asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}
# 24 "./arch/riscv/include/asm/io.h" 2
# 94 "./arch/riscv/include/asm/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __readsb(const volatile void *addr, void *buffer, unsigned int count) { do {} while (0); if (count) { u8 *buf = buffer; do { u8 x = __raw_readb(addr); *buf++ = x; } while (--count); } __asm__ __volatile__ ("fence i,r" : : : "memory"); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __readsw(const volatile void *addr, void *buffer, unsigned int count) { do {} while (0); if (count) { u16 *buf = buffer; do { u16 x = __raw_readw(addr); *buf++ = x; } while (--count); } __asm__ __volatile__ ("fence i,r" : : : "memory"); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __readsl(const volatile void *addr, void *buffer, unsigned int count) { do {} while (0); if (count) { u32 *buf = buffer; do { u32 x = __raw_readl(addr); *buf++ = x; } while (--count); } __asm__ __volatile__ ("fence i,r" : : : "memory"); }




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __insb(const volatile void *addr, void *buffer, unsigned int count) { __asm__ __volatile__ ("fence io,i" : : : "memory");; if (count) { u8 *buf = buffer; do { u8 x = __raw_readb(addr); *buf++ = x; } while (--count); } __asm__ __volatile__ ("fence i,ior" : : : "memory");; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __insw(const volatile void *addr, void *buffer, unsigned int count) { __asm__ __volatile__ ("fence io,i" : : : "memory");; if (count) { u16 *buf = buffer; do { u16 x = __raw_readw(addr); *buf++ = x; } while (--count); } __asm__ __volatile__ ("fence i,ior" : : : "memory");; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __insl(const volatile void *addr, void *buffer, unsigned int count) { __asm__ __volatile__ ("fence io,i" : : : "memory");; if (count) { u32 *buf = buffer; do { u32 x = __raw_readl(addr); *buf++ = x; } while (--count); } __asm__ __volatile__ ("fence i,ior" : : : "memory");; }




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __writesb(volatile void *addr, const void *buffer, unsigned int count) { __asm__ __volatile__ ("fence w,o" : : : "memory"); if (count) { const u8 *buf = buffer; do { __raw_writeb(*buf++, addr); } while (--count); } mmiowb_set_pending(); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __writesw(volatile void *addr, const void *buffer, unsigned int count) { __asm__ __volatile__ ("fence w,o" : : : "memory"); if (count) { const u16 *buf = buffer; do { __raw_writew(*buf++, addr); } while (--count); } mmiowb_set_pending(); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __writesl(volatile void *addr, const void *buffer, unsigned int count) { __asm__ __volatile__ ("fence w,o" : : : "memory"); if (count) { const u32 *buf = buffer; do { __raw_writel(*buf++, addr); } while (--count); } mmiowb_set_pending(); }




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __outsb(volatile void *addr, const void *buffer, unsigned int count) { __asm__ __volatile__ ("fence iow,o" : : : "memory");; if (count) { const u8 *buf = buffer; do { __raw_writeb(*buf++, addr); } while (--count); } __asm__ __volatile__ ("fence o,io" : : : "memory");; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __outsw(volatile void *addr, const void *buffer, unsigned int count) { __asm__ __volatile__ ("fence iow,o" : : : "memory");; if (count) { const u16 *buf = buffer; do { __raw_writew(*buf++, addr); } while (--count); } __asm__ __volatile__ ("fence o,io" : : : "memory");; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __outsl(volatile void *addr, const void *buffer, unsigned int count) { __asm__ __volatile__ ("fence iow,o" : : : "memory");; if (count) { const u32 *buf = buffer; do { __raw_writel(*buf++, addr); } while (--count); } __asm__ __volatile__ ("fence o,io" : : : "memory");; }





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __readsq(const volatile void *addr, void *buffer, unsigned int count) { do {} while (0); if (count) { u64 *buf = buffer; do { u64 x = __raw_readq(addr); *buf++ = x; } while (--count); } __asm__ __volatile__ ("fence i,r" : : : "memory"); }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __insq(const volatile void *addr, void *buffer, unsigned int count) { __asm__ __volatile__ ("fence io,i" : : : "memory");; if (count) { u64 *buf = buffer; do { u64 x = __raw_readq(addr); *buf++ = x; } while (--count); } __asm__ __volatile__ ("fence i,ior" : : : "memory");; }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __writesq(volatile void *addr, const void *buffer, unsigned int count) { __asm__ __volatile__ ("fence w,o" : : : "memory"); if (count) { const u64 *buf = buffer; do { __raw_writeq(*buf++, addr); } while (--count); } mmiowb_set_pending(); }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __outsq(volatile void *addr, const void *buffer, unsigned int count) { __asm__ __volatile__ ("fence io,i" : : : "memory");; if (count) { const u64 *buf = buffer; do { __raw_writeq(*buf++, addr); } while (--count); } __asm__ __volatile__ ("fence o,io" : : : "memory");; }




# 1 "./include/asm-generic/io.h" 1
# 19 "./include/asm-generic/io.h"
# 1 "./include/asm-generic/pci_iomap.h" 1
# 10 "./include/asm-generic/pci_iomap.h"
struct pci_dev;


extern void *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
extern void *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max);
extern void *pci_iomap_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
extern void *pci_iomap_wc_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
extern void pci_iounmap(struct pci_dev *dev, void *);
# 20 "./include/asm-generic/io.h" 2
# 459 "./include/asm-generic/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 _inb(unsigned long addr)
{
u8 val;

__asm__ __volatile__ ("fence io,i" : : : "memory");;
val = __raw_readb(((void *)(((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) )))))) - 0x01000000)) + addr);
__asm__ __volatile__ ("fence i,ior" : : : "memory");;
return val;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 _inw(unsigned long addr)
{
u16 val;

__asm__ __volatile__ ("fence io,i" : : : "memory");;
val = (( __u16)(__le16)((__le16 )__raw_readw(((void *)(((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) )))))) - 0x01000000)) + addr)));
__asm__ __volatile__ ("fence i,ior" : : : "memory");;
return val;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 _inl(unsigned long addr)
{
u32 val;

__asm__ __volatile__ ("fence io,i" : : : "memory");;
val = (( __u32)(__le32)((__le32 )__raw_readl(((void *)(((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) )))))) - 0x01000000)) + addr)));
__asm__ __volatile__ ("fence i,ior" : : : "memory");;
return val;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void _outb(u8 value, unsigned long addr)
{
__asm__ __volatile__ ("fence iow,o" : : : "memory");;
__raw_writeb(value, ((void *)(((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) )))))) - 0x01000000)) + addr);
__asm__ __volatile__ ("fence o,io" : : : "memory");;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void _outw(u16 value, unsigned long addr)
{
__asm__ __volatile__ ("fence iow,o" : : : "memory");;
__raw_writew((u16 )(( __le16)(__u16)(value)), ((void *)(((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) )))))) - 0x01000000)) + addr);
__asm__ __volatile__ ("fence o,io" : : : "memory");;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void _outl(u32 value, unsigned long addr)
{
__asm__ __volatile__ ("fence iow,o" : : : "memory");;
__raw_writel((u32 )(( __le32)(__u32)(value)), ((void *)(((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) )))))) - 0x01000000)) + addr);
__asm__ __volatile__ ("fence o,io" : : : "memory");;
}



# 1 "./include/linux/logic_pio.h" 1
# 11 "./include/linux/logic_pio.h"
# 1 "./include/linux/fwnode.h" 1
# 17 "./include/linux/fwnode.h"
struct fwnode_operations;
struct device;
# 36 "./include/linux/fwnode.h"
struct fwnode_handle {
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
struct device *dev;
struct list_head suppliers;
struct list_head consumers;
u8 flags;
};

struct fwnode_link {
struct fwnode_handle *supplier;
struct list_head s_hook;
struct fwnode_handle *consumer;
struct list_head c_hook;
};







struct fwnode_endpoint {
unsigned int port;
unsigned int id;
const struct fwnode_handle *local_fwnode;
};
# 79 "./include/linux/fwnode.h"
struct fwnode_reference_args {
struct fwnode_handle *fwnode;
unsigned int nargs;
u64 args[8];
};
# 110 "./include/linux/fwnode.h"
struct fwnode_operations {
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
void (*put)(struct fwnode_handle *fwnode);
bool (*device_is_available)(const struct fwnode_handle *fwnode);
const void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
const struct device *dev);
bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
int (*property_read_int_array)(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval);
int
(*property_read_string_array)(const struct fwnode_handle *fwnode_handle,
const char *propname, const char **val,
size_t nval);
const char *(*get_name)(const struct fwnode_handle *fwnode);
const char *(*get_name_prefix)(const struct fwnode_handle *fwnode);
struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
(*get_next_child_node)(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
struct fwnode_handle *
(*get_named_child_node)(const struct fwnode_handle *fwnode,
const char *name);
int (*get_reference_args)(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int nargs, unsigned int index,
struct fwnode_reference_args *args);
struct fwnode_handle *
(*graph_get_next_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev);
struct fwnode_handle *
(*graph_get_remote_endpoint)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
(*graph_get_port_parent)(struct fwnode_handle *fwnode);
int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
int (*add_links)(struct fwnode_handle *fwnode);
};
# 172 "./include/linux/fwnode.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fwnode_init(struct fwnode_handle *fwnode,
const struct fwnode_operations *ops)
{
fwnode->ops = ops;
INIT_LIST_HEAD(&fwnode->consumers);
INIT_LIST_HEAD(&fwnode->suppliers);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fwnode_dev_initialized(struct fwnode_handle *fwnode,
bool initialized)
{
if (IS_ERR_OR_NULL(fwnode))
return;

if (initialized)
fwnode->flags |= ((((1UL))) << (2));
else
fwnode->flags &= ~((((1UL))) << (2));
}

extern u32 fw_devlink_get_flags(void);
extern bool fw_devlink_is_strict(void);
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
void fwnode_links_purge(struct fwnode_handle *fwnode);
void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode);
# 12 "./include/linux/logic_pio.h" 2

enum {
LOGIC_PIO_INDIRECT,
LOGIC_PIO_CPU_MMIO,
};

struct logic_pio_hwaddr {
struct list_head list;
struct fwnode_handle *fwnode;
resource_size_t hw_start;
resource_size_t io_start;
resource_size_t size;
unsigned long flags;

void *hostdata;
const struct logic_pio_host_ops *ops;
};

struct logic_pio_host_ops {
u32 (*in)(void *hostdata, unsigned long addr, size_t dwidth);
void (*out)(void *hostdata, unsigned long addr, u32 val,
size_t dwidth);
u32 (*ins)(void *hostdata, unsigned long addr, void *buffer,
size_t dwidth, unsigned int count);
void (*outs)(void *hostdata, unsigned long addr, const void *buffer,
size_t dwidth, unsigned int count);
};
# 116 "./include/linux/logic_pio.h"
struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
resource_size_t hw_addr, resource_size_t size);
int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
resource_size_t logic_pio_to_hwaddr(unsigned long pio);
unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
# 527 "./include/asm-generic/io.h" 2
# 554 "./include/asm-generic/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 inb_p(unsigned long addr)
{
return _inb(addr);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 inw_p(unsigned long addr)
{
return _inw(addr);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 inl_p(unsigned long addr)
{
return _inl(addr);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void outb_p(u8 value, unsigned long addr)
{
_outb(value, addr);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void outw_p(u16 value, unsigned long addr)
{
_outw(value, addr);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void outl_p(u32 value, unsigned long addr)
{
_outl(value, addr);
}
# 658 "./include/asm-generic/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void insb_p(unsigned long addr, void *buffer, unsigned int count)
{
__insb((void *)(long)addr, buffer, count);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void insw_p(unsigned long addr, void *buffer, unsigned int count)
{
__insw((void *)(long)addr, buffer, count);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void insl_p(unsigned long addr, void *buffer, unsigned int count)
{
__insl((void *)(long)addr, buffer, count);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void outsb_p(unsigned long addr, const void *buffer,
unsigned int count)
{
__outsb((void *)(long)addr, buffer, count);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void outsw_p(unsigned long addr, const void *buffer,
unsigned int count)
{
__outsw((void *)(long)addr, buffer, count);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void outsl_p(unsigned long addr, const void *buffer,
unsigned int count)
{
__outsl((void *)(long)addr, buffer, count);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 ioread8(const volatile void *addr)
{
return ({ u8 __v; do {} while (0); __v = ({ u8 __r = __raw_readb(addr); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; });
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 ioread16(const volatile void *addr)
{
return ({ u16 __v; do {} while (0); __v = ({ u16 __r = (( __u16)(__le16)(( __le16)__raw_readw(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; });
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 ioread32(const volatile void *addr)
{
return ({ u32 __v; do {} while (0); __v = ({ u32 __r = (( __u32)(__le32)(( __le32)__raw_readl(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; });
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 ioread64(const volatile void *addr)
{
return ({ u64 __v; do {} while (0); __v = ({ u64 __r = (( __u64)(__le64)(( __le64)__raw_readq(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; });
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iowrite8(u8 value, volatile void *addr)
{
({ __asm__ __volatile__ ("fence w,o" : : : "memory"); ((void)__raw_writeb(((value)), ((addr)))); mmiowb_set_pending(); });
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iowrite16(u16 value, volatile void *addr)
{
({ __asm__ __volatile__ ("fence w,o" : : : "memory"); ((void)__raw_writew(( u16)(( __le16)(__u16)((value))), ((addr)))); mmiowb_set_pending(); });
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iowrite32(u32 value, volatile void *addr)
{
({ __asm__ __volatile__ ("fence w,o" : : : "memory"); ((void)__raw_writel(( u32)(( __le32)(__u32)((value))), ((addr)))); mmiowb_set_pending(); });
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iowrite64(u64 value, volatile void *addr)
{
({ __asm__ __volatile__ ("fence w,o" : : : "memory"); ((void)__raw_writeq(( u64)(( __le64)(__u64)((value))), ((addr)))); mmiowb_set_pending(); });
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 ioread16be(const volatile void *addr)
{
return (__builtin_constant_p((__u16)(({ u16 __v; do {} while (0); __v = ({ u16 __r = (( __u16)(__le16)(( __le16)__raw_readw(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; }))) ? ((__u16)( (((__u16)(({ u16 __v; do {} while (0); __v = ({ u16 __r = (( __u16)(__le16)(( __le16)__raw_readw(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u16)0x00ffU) << 8) | (((__u16)(({ u16 __v; do {} while (0); __v = ({ u16 __r = (( __u16)(__le16)(( __le16)__raw_readw(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u16)0xff00U) >> 8))) : __fswab16(({ u16 __v; do {} while (0); __v = ({ u16 __r = (( __u16)(__le16)(( __le16)__raw_readw(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })));
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 ioread32be(const volatile void *addr)
{
return (__builtin_constant_p((__u32)(({ u32 __v; do {} while (0); __v = ({ u32 __r = (( __u32)(__le32)(( __le32)__raw_readl(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; }))) ? ((__u32)( (((__u32)(({ u32 __v; do {} while (0); __v = ({ u32 __r = (( __u32)(__le32)(( __le32)__raw_readl(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u32)0x000000ffUL) << 24) | (((__u32)(({ u32 __v; do {} while (0); __v = ({ u32 __r = (( __u32)(__le32)(( __le32)__raw_readl(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u32)0x0000ff00UL) << 8) | (((__u32)(({ u32 __v; do {} while (0); __v = ({ u32 __r = (( __u32)(__le32)(( __le32)__raw_readl(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(({ u32 __v; do {} while (0); __v = ({ u32 __r = (( __u32)(__le32)(( __le32)__raw_readl(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u32)0xff000000UL) >> 24))) : __fswab32(({ u32 __v; do {} while (0); __v = ({ u32 __r = (( __u32)(__le32)(( __le32)__raw_readl(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 ioread64be(const volatile void *addr)
{
return (__builtin_constant_p((__u64)(({ u64 __v; do {} while (0); __v = ({ u64 __r = (( __u64)(__le64)(( __le64)__raw_readq(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; }))) ? ((__u64)( (((__u64)(({ u64 __v; do {} while (0); __v = ({ u64 __r = (( __u64)(__le64)(( __le64)__raw_readq(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(({ u64 __v; do {} while (0); __v = ({ u64 __r = (( __u64)(__le64)(( __le64)__raw_readq(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(({ u64 __v; do {} while (0); __v = ({ u64 __r = (( __u64)(__le64)(( __le64)__raw_readq(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(({ u64 __v; do {} while (0); __v = ({ u64 __r = (( __u64)(__le64)(( __le64)__raw_readq(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(({ u64 __v; do {} while (0); __v = ({ u64 __r = (( __u64)(__le64)(( __le64)__raw_readq(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(({ u64 __v; do {} while (0); __v = ({ u64 __r = (( __u64)(__le64)(( __le64)__raw_readq(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(({ u64 __v; do {} while (0); __v = ({ u64 __r = (( __u64)(__le64)(( __le64)__raw_readq(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(({ u64 __v; do {} while (0); __v = ({ u64 __r = (( __u64)(__le64)(( __le64)__raw_readq(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(({ u64 __v; do {} while (0); __v = ({ u64 __r = (( __u64)(__le64)(( __le64)__raw_readq(addr))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; })));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iowrite16be(u16 value, void volatile *addr)
{
({ __asm__ __volatile__ ("fence w,o" : : : "memory"); ((void)__raw_writew(( u16)(( __le16)(__u16)(((__builtin_constant_p((__u16)(value)) ? ((__u16)( (((__u16)(value) & (__u16)0x00ffU) << 8) | (((__u16)(value) & (__u16)0xff00U) >> 8))) : __fswab16(value))))), ((addr)))); mmiowb_set_pending(); });
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iowrite32be(u32 value, volatile void *addr)
{
({ __asm__ __volatile__ ("fence w,o" : : : "memory"); ((void)__raw_writel(( u32)(( __le32)(__u32)(((__builtin_constant_p((__u32)(value)) ? ((__u32)( (((__u32)(value) & (__u32)0x000000ffUL) << 24) | (((__u32)(value) & (__u32)0x0000ff00UL) << 8) | (((__u32)(value) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(value) & (__u32)0xff000000UL) >> 24))) : __fswab32(value))))), ((addr)))); mmiowb_set_pending(); });
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iowrite64be(u64 value, volatile void *addr)
{
({ __asm__ __volatile__ ("fence w,o" : : : "memory"); ((void)__raw_writeq(( u64)(( __le64)(__u64)(((__builtin_constant_p((__u64)(value)) ? ((__u64)( (((__u64)(value) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(value) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(value) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(value) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(value) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(value) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(value) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(value) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(value))))), ((addr)))); mmiowb_set_pending(); });
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ioread8_rep(const volatile void *addr, void *buffer,
unsigned int count)
{
__readsb(addr, buffer, count);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ioread16_rep(const volatile void *addr,
void *buffer, unsigned int count)
{
__readsw(addr, buffer, count);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ioread32_rep(const volatile void *addr,
void *buffer, unsigned int count)
{
__readsl(addr, buffer, count);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ioread64_rep(const volatile void *addr,
void *buffer, unsigned int count)
{
__readsq(addr, buffer, count);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iowrite8_rep(volatile void *addr,
const void *buffer,
unsigned int count)
{
__writesb(addr, buffer, count);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iowrite16_rep(volatile void *addr,
const void *buffer,
unsigned int count)
{
__writesw(addr, buffer, count);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iowrite32_rep(volatile void *addr,
const void *buffer,
unsigned int count)
{
__writesl(addr, buffer, count);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iowrite64_rep(volatile void *addr,
const void *buffer,
unsigned int count)
{
__writesq(addr, buffer, count);
}







# 1 "./include/linux/vmalloc.h" 1
# 13 "./include/linux/vmalloc.h"
# 1 "./arch/riscv/include/asm/vmalloc.h" 1
# 14 "./include/linux/vmalloc.h" 2

struct vm_area_struct;
struct notifier_block;
# 48 "./include/linux/vmalloc.h"
struct vm_struct {
struct vm_struct *next;
void *addr;
unsigned long size;
unsigned long flags;
struct page **pages;



unsigned int nr_pages;
phys_addr_t phys_addr;
const void *caller;
};

struct vmap_area {
unsigned long va_start;
unsigned long va_end;

struct rb_node rb_node;
struct list_head list;







union {
unsigned long subtree_max_size;
struct vm_struct *vm;
};
};



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool arch_vmap_p4d_supported(pgprot_t prot)
{
return false;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool arch_vmap_pud_supported(pgprot_t prot)
{
return false;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool arch_vmap_pmd_supported(pgprot_t prot)
{
return false;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
u64 pfn, unsigned int max_page_shift)
{
return ((1UL) << (12));
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_vmap_pte_supported_shift(unsigned long size)
{
return (12);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
{
return prot;
}





extern void vm_unmap_ram(const void *mem, unsigned int count);
extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
extern void vm_unmap_aliases(void);


extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) vmalloc_init(void);
extern unsigned long vmalloc_nr_pages(void);







extern void *vmalloc(unsigned long size) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
extern void *vzalloc(unsigned long size) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
extern void *vmalloc_user(unsigned long size) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
extern void *vmalloc_node(unsigned long size, int node) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
extern void *vzalloc_node(unsigned long size, int node) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
extern void *vmalloc_32(unsigned long size) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
extern void *vmalloc_32_user(unsigned long size) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));
void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __attribute__((__alloc_size__(1))) __attribute__((__malloc__));

extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __attribute__((__alloc_size__(1, 2))) __attribute__((__malloc__));
extern void *vmalloc_array(size_t n, size_t size) __attribute__((__alloc_size__(1, 2))) __attribute__((__malloc__));
extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __attribute__((__alloc_size__(1, 2))) __attribute__((__malloc__));
extern void *vcalloc(size_t n, size_t size) __attribute__((__alloc_size__(1, 2))) __attribute__((__malloc__));

extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);

extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
extern void vunmap(const void *addr);

extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
unsigned long uaddr, void *kaddr,
unsigned long pgoff, unsigned long size);

extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
# 192 "./include/linux/vmalloc.h"
void arch_sync_kernel_mappings(unsigned long start, unsigned long end);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) size_t get_vm_area_size(const struct vm_struct *area)
{
if (!(area->flags & 0x00000040))

return area->size - ((1UL) << (12));
else
return area->size;

}

extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *get_vm_area_caller(unsigned long size,
unsigned long flags, const void *caller);
extern struct vm_struct *__get_vm_area_caller(unsigned long size,
unsigned long flags,
unsigned long start, unsigned long end,
const void *caller);
void free_vm_area(struct vm_struct *area);
extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_vm_area_hugepages(const void *addr)
{
# 231 "./include/linux/vmalloc.h"
return false;

}


void vunmap_range(unsigned long addr, unsigned long end);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_vm_flush_reset_perms(void *addr)
{
struct vm_struct *vm = find_vm_area(addr);

if (vm)
vm->flags |= 0x00000100;
}
# 252 "./include/linux/vmalloc.h"
extern long vread(char *buf, char *addr, unsigned long count);




extern struct list_head vmap_area_list;
extern __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) void vm_area_add_early(struct vm_struct *vm);
extern __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) void vm_area_register_early(struct vm_struct *vm, size_t align);



struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align);

void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
# 290 "./include/linux/vmalloc.h"
int register_vmap_purge_notifier(struct notifier_block *nb);
int unregister_vmap_purge_notifier(struct notifier_block *nb);


bool vmalloc_dump_obj(void *object);
# 912 "./include/asm-generic/io.h" 2








static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long virt_to_phys(volatile void *address)
{
return ({ unsigned long _x = (unsigned long)((unsigned long)address); ((_x) >= kernel_map.page_offset && (!1 || (_x) < kernel_map.page_offset + (((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2))) ? ((unsigned long)(_x) - kernel_map.va_pa_offset) : ({ unsigned long _y = _x; (0 && _y < kernel_map.virt_addr + 0) ? ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - 0); }); });
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *phys_to_virt(unsigned long address)
{
return ((void *)((void *)((unsigned long)((phys_addr_t)(address)) + kernel_map.va_pa_offset)));
}
# 967 "./include/asm-generic/io.h"
void *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
void iounmap(volatile void *addr);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *ioremap(phys_addr_t addr, size_t size)
{

return ioremap_prot(addr, size, ((1 << 1) | (1 << 2) | (1 << 0) | (1 << 6) | (1 << 7) | (1 << 5)));
}
# 994 "./include/asm-generic/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *ioremap_uc(phys_addr_t offset, size_t size)
{
return ((void *)0);
}
# 1011 "./include/asm-generic/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *ioremap_np(phys_addr_t offset, size_t size)
{
return ((void *)0);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *ioport_map(unsigned long port, unsigned int nr)
{
port &= (0x01000000 - 1);
return (port > ((0x01000000 - 1) - 0)) ? ((void *)0) : ((void *)(((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) )))))) - 0x01000000)) + port;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ioport_unmap(void *p)
{
}
# 1049 "./include/asm-generic/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *xlate_dev_mem_ptr(phys_addr_t addr)
{
return ((void *)((void *)((unsigned long)((phys_addr_t)(addr)) + kernel_map.va_pa_offset)));
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
{
}
# 1086 "./include/asm-generic/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memset_io(volatile void *addr, int value,
size_t size)
{
memset(((void *)(addr)), value, size);
}
# 1103 "./include/asm-generic/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memcpy_fromio(void *buffer,
const volatile void *addr,
size_t size)
{
memcpy(buffer, ((void *)(addr)), size);
}
# 1121 "./include/asm-generic/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memcpy_toio(volatile void *addr, const void *buffer,
size_t size)
{
memcpy(((void *)(addr)), buffer, size);
}



extern int devmem_is_allowed(unsigned long pfn);
# 137 "./arch/riscv/include/asm/io.h" 2
# 14 "./include/linux/io.h" 2


struct device;
struct resource;

void __iowrite32_copy(void *to, const void *from, size_t count);
void __ioread32_copy(void *to, const void *from, size_t count);
void __iowrite64_copy(void *to, const void *from, size_t count);


int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot);
# 38 "./include/linux/io.h"
void * devm_ioport_map(struct device *dev, unsigned long port,
unsigned int nr);
void devm_ioport_unmap(struct device *dev, void *addr);
# 56 "./include/linux/io.h"
void *devm_ioremap(struct device *dev, resource_size_t offset,
resource_size_t size);
void *devm_ioremap_uc(struct device *dev, resource_size_t offset,
resource_size_t size);
void *devm_ioremap_wc(struct device *dev, resource_size_t offset,
resource_size_t size);
void *devm_ioremap_np(struct device *dev, resource_size_t offset,
resource_size_t size);
void devm_iounmap(struct device *dev, void *addr);
int check_signature(const volatile void *io_addr,
const unsigned char *signature, int length);
void devm_ioremap_release(struct device *dev, void *res);

void *devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags);
void devm_memunmap(struct device *dev, void *addr);
# 86 "./include/linux/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *pci_remap_cfgspace(phys_addr_t offset,
size_t size)
{
return ioremap_np(offset, size) ?: ioremap(offset, size);
}
# 115 "./include/linux/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) arch_phys_wc_add(unsigned long base,
unsigned long size)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_phys_wc_del(int handle)
{
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_phys_wc_index(int handle)
{
return -1;
}




int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size);

enum {

MEMREMAP_WB = 1 << 0,
MEMREMAP_WT = 1 << 1,
MEMREMAP_WC = 1 << 2,
MEMREMAP_ENC = 1 << 3,
MEMREMAP_DEC = 1 << 4,
};

void *memremap(resource_size_t offset, size_t size, unsigned long flags);
void memunmap(void *addr);
# 159 "./include/linux/io.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_io_reserve_memtype_wc(resource_size_t base,
resource_size_t size)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_io_free_memtype_wc(resource_size_t base,
resource_size_t size)
{
}


int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
resource_size_t size);
# 21 "./include/linux/irq.h" 2


# 1 "./arch/riscv/include/asm/irq.h" 1
# 10 "./arch/riscv/include/asm/irq.h"
# 1 "./include/linux/interrupt.h" 1
# 11 "./include/linux/interrupt.h"
# 1 "./include/linux/hardirq.h" 1
# 12 "./include/linux/interrupt.h" 2








# 1 "./arch/riscv/include/asm/irq.h" 1
# 21 "./include/linux/interrupt.h" 2
# 1 "./arch/riscv/include/asm/sections.h" 1







# 1 "./include/asm-generic/sections.h" 1
# 35 "./include/asm-generic/sections.h"
extern char _text[], _stext[], _etext[];
extern char _data[], _sdata[], _edata[];
extern char __bss_start[], __bss_stop[];
extern char __init_begin[], __init_end[];
extern char _sinittext[], _einittext[];
extern char __start_ro_after_init[], __end_ro_after_init[];
extern char _end[];
extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];
extern char __entry_text_start[], __entry_text_end[];
extern char __start_rodata[], __end_rodata[];
extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __softirqentry_text_start[], __softirqentry_text_end[];
extern char __start_once[], __end_once[];


extern char __ctors_start[], __ctors_end[];


extern char __start_opd[], __end_opd[];


extern char __noinstr_text_start[], __noinstr_text_end[];

extern const void __nosave_begin, __nosave_end;
# 70 "./include/asm-generic/sections.h"
typedef struct {
unsigned long addr;
} func_desc_t;


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool have_function_descriptors(void)
{
return 0;
}
# 91 "./include/asm-generic/sections.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool memory_contains(void *begin, void *end, void *virt,
size_t size)
{
return virt >= begin && virt + size <= end;
}
# 108 "./include/asm-generic/sections.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool memory_intersects(void *begin, void *end, void *virt,
size_t size)
{
void *vend = virt + size;

return (virt >= begin && virt < end) || (vend >= begin && vend < end);
}
# 125 "./include/asm-generic/sections.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool init_section_contains(void *virt, size_t size)
{
return memory_contains(__init_begin, __init_end, virt, size);
}
# 139 "./include/asm-generic/sections.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool init_section_intersects(void *virt, size_t size)
{
return memory_intersects(__init_begin, __init_end, virt, size);
}
# 154 "./include/asm-generic/sections.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_kernel_core_data(unsigned long addr)
{
if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata)
return true;

if (addr >= (unsigned long)__bss_start &&
addr < (unsigned long)__bss_stop)
return true;

return false;
}
# 174 "./include/asm-generic/sections.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_kernel_rodata(unsigned long addr)
{
return addr >= (unsigned long)__start_rodata &&
addr < (unsigned long)__end_rodata;
}
# 188 "./include/asm-generic/sections.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_kernel_inittext(unsigned long addr)
{
return addr >= (unsigned long)_sinittext &&
addr < (unsigned long)_einittext;
}
# 203 "./include/asm-generic/sections.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __is_kernel_text(unsigned long addr)
{
return addr >= (unsigned long)_stext &&
addr < (unsigned long)_etext;
}
# 219 "./include/asm-generic/sections.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __is_kernel(unsigned long addr)
{
return ((addr >= (unsigned long)_stext &&
addr < (unsigned long)_end) ||
(addr >= (unsigned long)__init_begin &&
addr < (unsigned long)__init_end));
}
# 9 "./arch/riscv/include/asm/sections.h" 2


extern char _start[];
extern char _start_kernel[];
extern char __init_data_begin[], __init_data_end[];
extern char __init_text_begin[], __init_text_end[];
extern char __alt_start[], __alt_end[];

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_va_kernel_text(uintptr_t va)
{
uintptr_t start = (uintptr_t)_start;
uintptr_t end = (uintptr_t)__init_data_begin;

return va >= start && va < end;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_va_kernel_lm_alias_text(uintptr_t va)
{
uintptr_t start = (uintptr_t)((void *)((void *)((unsigned long)((phys_addr_t)(({ unsigned long _x = ({ unsigned long __ptr; __ptr = (unsigned long) ((unsigned long)(_start)); (typeof((unsigned long)(_start))) (__ptr + (0)); }); ((_x) >= kernel_map.page_offset && (!1 || (_x) < kernel_map.page_offset + (((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2))) ? ((unsigned long)(_x) - kernel_map.va_pa_offset) : ({ unsigned long _y = _x; (0 && _y < kernel_map.virt_addr + 0) ? ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - 0); }); }))) + kernel_map.va_pa_offset)));
uintptr_t end = (uintptr_t)((void *)((void *)((unsigned long)((phys_addr_t)(({ unsigned long _x = ({ unsigned long __ptr; __ptr = (unsigned long) ((unsigned long)(__init_data_begin)); (typeof((unsigned long)(__init_data_begin))) (__ptr + (0)); }); ((_x) >= kernel_map.page_offset && (!1 || (_x) < kernel_map.page_offset + (((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2))) ? ((unsigned long)(_x) - kernel_map.va_pa_offset) : ({ unsigned long _y = _x; (0 && _y < kernel_map.virt_addr + 0) ? ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - 0); }); }))) + kernel_map.va_pa_offset)));

return va >= start && va < end;
}
# 22 "./include/linux/interrupt.h" 2
# 95 "./include/linux/interrupt.h"
enum {
IRQC_IS_HARDIRQ = 0,
IRQC_IS_NESTED,
};

typedef irqreturn_t (*irq_handler_t)(int, void *);
# 118 "./include/linux/interrupt.h"
struct irqaction {
irq_handler_t handler;
void *dev_id;
void *percpu_dev_id;
struct irqaction *next;
irq_handler_t thread_fn;
struct task_struct *thread;
struct irqaction *secondary;
unsigned int irq;
unsigned int flags;
unsigned long thread_flags;
unsigned long thread_mask;
const char *name;
struct proc_dir_entry *dir;
} __attribute__((__aligned__(1 << (6))));

extern irqreturn_t no_action(int cpl, void *dev_id);
# 146 "./include/linux/interrupt.h"
extern int __attribute__((__warn_unused_result__))
request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn,
unsigned long flags, const char *name, void *dev);
# 164 "./include/linux/interrupt.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__))
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev)
{
return request_threaded_irq(irq, handler, ((void *)0), flags, name, dev);
}

extern int __attribute__((__warn_unused_result__))
request_any_context_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *name, void *dev_id);

extern int __attribute__((__warn_unused_result__))
__request_percpu_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *devname,
void *percpu_dev_id);

extern int __attribute__((__warn_unused_result__))
request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__))
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void *percpu_dev_id)
{
return __request_percpu_irq(irq, handler, 0,
devname, percpu_dev_id);
}

extern int __attribute__((__warn_unused_result__))
request_percpu_nmi(unsigned int irq, irq_handler_t handler,
const char *devname, void *dev);

extern const void *free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void *);

extern const void *free_nmi(unsigned int irq, void *dev_id);
extern void free_percpu_nmi(unsigned int irq, void *percpu_dev_id);

struct device;

extern int __attribute__((__warn_unused_result__))
devm_request_threaded_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, irq_handler_t thread_fn,
unsigned long irqflags, const char *devname,
void *dev_id);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__))
devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
{
return devm_request_threaded_irq(dev, irq, handler, ((void *)0), irqflags,
devname, dev_id);
}

extern int __attribute__((__warn_unused_result__))
devm_request_any_context_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, unsigned long irqflags,
const char *devname, void *dev_id);

extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
# 243 "./include/linux/interrupt.h"
bool irq_has_action(unsigned int irq);
extern void disable_irq_nosync(unsigned int irq);
extern bool disable_hardirq(unsigned int irq);
extern void disable_irq(unsigned int irq);
extern void disable_percpu_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
extern void enable_percpu_irq(unsigned int irq, unsigned int type);
extern bool irq_percpu_is_enabled(unsigned int irq);
extern void irq_wake_thread(unsigned int irq, void *dev_id);

extern void disable_nmi_nosync(unsigned int irq);
extern void disable_percpu_nmi(unsigned int irq);
extern void enable_nmi(unsigned int irq);
extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
extern int prepare_percpu_nmi(unsigned int irq);
extern void teardown_percpu_nmi(unsigned int irq);

extern int irq_inject_interrupt(unsigned int irq);


extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
extern void rearm_wake_irq(unsigned int irq);
# 279 "./include/linux/interrupt.h"
struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
struct work_struct work;
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
void (*release)(struct kref *ref);
};
# 303 "./include/linux/interrupt.h"
struct irq_affinity {
unsigned int pre_vectors;
unsigned int post_vectors;
unsigned int nr_sets;
unsigned int set_size[4];
void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
void *priv;
};






struct irq_affinity_desc {
struct cpumask mask;
unsigned int is_managed : 1;
};



extern cpumask_var_t irq_default_affinity;

extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);

extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);

extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
bool setaffinity);
# 342 "./include/linux/interrupt.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
{
return __irq_apply_affinity_hint(irq, m, false);
}
# 357 "./include/linux/interrupt.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
{
return __irq_apply_affinity_hint(irq, m, true);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
{
return irq_set_affinity_and_hint(irq, m);
}

extern int irq_update_affinity_desc(unsigned int irq,
struct irq_affinity_desc *affinity);

extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);

struct irq_affinity_desc *
irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);

unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
const struct irq_affinity *affd);
# 459 "./include/linux/interrupt.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void disable_irq_nosync_lockdep(unsigned int irq)
{
disable_irq_nosync(irq);

do { bool was_disabled = (arch_irqs_disabled()); arch_local_irq_disable(); if (!was_disabled) trace_hardirqs_off(); } while (0);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
{
disable_irq_nosync(irq);

do { do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); *flags = arch_local_irq_save(); } while (0); if (!({ ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(*flags); })) trace_hardirqs_off(); } while (0);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void disable_irq_lockdep(unsigned int irq)
{
disable_irq(irq);

do { bool was_disabled = (arch_irqs_disabled()); arch_local_irq_disable(); if (!was_disabled) trace_hardirqs_off(); } while (0);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void enable_irq_lockdep(unsigned int irq)
{

do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0);

enable_irq(irq);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
{

do { if (!({ ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(*flags); })) trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(*flags); } while (0); } while (0);

enable_irq(irq);
}


extern int irq_set_irq_wake(unsigned int irq, unsigned int on);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int enable_irq_wake(unsigned int irq)
{
return irq_set_irq_wake(irq, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int disable_irq_wake(unsigned int irq)
{
return irq_set_irq_wake(irq, 0);
}




enum irqchip_irq_state {
IRQCHIP_STATE_PENDING,
IRQCHIP_STATE_ACTIVE,
IRQCHIP_STATE_MASKED,
IRQCHIP_STATE_LINE_LEVEL,
};

extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
bool *state);
extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
bool state);





extern struct static_key_false force_irqthreads_key;
# 566 "./include/linux/interrupt.h"
enum
{
HI_SOFTIRQ=0,
TIMER_SOFTIRQ,
NET_TX_SOFTIRQ,
NET_RX_SOFTIRQ,
BLOCK_SOFTIRQ,
IRQ_POLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
HRTIMER_SOFTIRQ,
RCU_SOFTIRQ,

NR_SOFTIRQS
};
# 596 "./include/linux/interrupt.h"
extern const char * const softirq_to_name[NR_SOFTIRQS];





struct softirq_action
{
void (*action)(struct softirq_action *);
};

void do_softirq(void);
void __do_softirq(void);

extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);

extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);

extern __attribute__((section(".data..percpu" ""))) __typeof__(struct task_struct *) ksoftirqd;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct task_struct *this_cpu_ksoftirqd(void)
{
return ({ typeof(ksoftirqd) pscr_ret__; do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(ksoftirqd)) { case 1: pscr_ret__ = ({ typeof(ksoftirqd) __ret; if ((sizeof(ksoftirqd) == sizeof(char) || sizeof(ksoftirqd) == sizeof(short) || sizeof(ksoftirqd) == sizeof(int) || sizeof(ksoftirqd) == sizeof(long))) __ret = ({ typeof(ksoftirqd) ___ret; do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ___ret = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_222(void) ; if (!((sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(char) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(short) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(int) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long)) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long long))) __compiletime_assert_222(); } while (0); (*(const volatile typeof( _Generic((*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })))) *)&(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }))); }); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); ___ret; }); else __ret = ({ typeof(ksoftirqd) ___ret; unsigned long ___flags; do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); ___flags = arch_local_irq_save(); } while (0); ___ret = ({ *({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(___flags); } while (0); ___ret; }); __ret; }); break; case 2: pscr_ret__ = ({ typeof(ksoftirqd) __ret; if ((sizeof(ksoftirqd) == sizeof(char) || sizeof(ksoftirqd) == sizeof(short) || sizeof(ksoftirqd) == sizeof(int) || sizeof(ksoftirqd) == sizeof(long))) __ret = ({ typeof(ksoftirqd) ___ret; do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ___ret = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_223(void) ; if (!((sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(char) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(short) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(int) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long)) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long long))) __compiletime_assert_223(); } while (0); (*(const volatile typeof( _Generic((*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })))) *)&(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }))); }); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); ___ret; }); else __ret = ({ typeof(ksoftirqd) ___ret; unsigned long ___flags; do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); ___flags = arch_local_irq_save(); } while (0); ___ret = ({ *({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(___flags); } while (0); ___ret; }); __ret; }); break; case 4: pscr_ret__ = ({ typeof(ksoftirqd) __ret; if ((sizeof(ksoftirqd) == sizeof(char) || sizeof(ksoftirqd) == sizeof(short) || sizeof(ksoftirqd) == sizeof(int) || sizeof(ksoftirqd) == sizeof(long))) __ret = ({ typeof(ksoftirqd) ___ret; do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ___ret = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_224(void) ; if (!((sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(char) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(short) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(int) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long)) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long long))) __compiletime_assert_224(); } while (0); (*(const volatile typeof( _Generic((*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })))) *)&(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }))); }); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); ___ret; }); else __ret = ({ typeof(ksoftirqd) ___ret; unsigned long ___flags; do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); ___flags = arch_local_irq_save(); } while (0); ___ret = ({ *({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(___flags); } while (0); ___ret; }); __ret; }); break; case 8: pscr_ret__ = ({ typeof(ksoftirqd) __ret; if ((sizeof(ksoftirqd) == sizeof(char) || sizeof(ksoftirqd) == sizeof(short) || sizeof(ksoftirqd) == sizeof(int) || sizeof(ksoftirqd) == sizeof(long))) __ret = ({ typeof(ksoftirqd) ___ret; do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ___ret = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_225(void) ; if (!((sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(char) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(short) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(int) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long)) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long long))) __compiletime_assert_225(); } while (0); (*(const volatile typeof( _Generic((*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })))) *)&(*({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }))); }); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); ___ret; }); else __ret = ({ typeof(ksoftirqd) ___ret; unsigned long ___flags; do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); ___flags = arch_local_irq_save(); } while (0); ___ret = ({ *({ do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd))); (typeof((typeof(*(&(ksoftirqd))) *)(&(ksoftirqd)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(___flags); } while (0); ___ret; }); __ret; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; });
}
# 647 "./include/linux/interrupt.h"
struct tasklet_struct
{
struct tasklet_struct *next;
unsigned long state;
atomic_t count;
bool use_callback;
union {
void (*func)(unsigned long data);
void (*callback)(struct tasklet_struct *t);
};
unsigned long data;
};
# 689 "./include/linux/interrupt.h"
enum
{
TASKLET_STATE_SCHED,
TASKLET_STATE_RUN
};


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}

void tasklet_unlock(struct tasklet_struct *t);
void tasklet_unlock_wait(struct tasklet_struct *t);
void tasklet_unlock_spin_wait(struct tasklet_struct *t);
# 712 "./include/linux/interrupt.h"
extern void __tasklet_schedule(struct tasklet_struct *t);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tasklet_schedule(struct tasklet_struct *t)
{
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
__tasklet_schedule(t);
}

extern void __tasklet_hi_schedule(struct tasklet_struct *t);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tasklet_hi_schedule(struct tasklet_struct *t)
{
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
__tasklet_hi_schedule(t);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tasklet_disable_nosync(struct tasklet_struct *t)
{
atomic_inc(&t->count);
do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tasklet_disable_in_atomic(struct tasklet_struct *t)
{
tasklet_disable_nosync(t);
tasklet_unlock_spin_wait(t);
do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tasklet_disable(struct tasklet_struct *t)
{
tasklet_disable_nosync(t);
tasklet_unlock_wait(t);
do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tasklet_enable(struct tasklet_struct *t)
{
do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);
atomic_dec(&t->count);
}

extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
extern void tasklet_setup(struct tasklet_struct *t,
void (*callback)(struct tasklet_struct *));
# 793 "./include/linux/interrupt.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long probe_irq_on(void)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int probe_irq_off(unsigned long val)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int probe_irq_mask(unsigned long val)
{
return 0;
}
# 813 "./include/linux/interrupt.h"
extern void init_irq_proc(void);
# 826 "./include/linux/interrupt.h"
struct seq_file;
int show_interrupts(struct seq_file *p, void *v);
int arch_show_interrupts(struct seq_file *p, int prec);

extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
# 11 "./arch/riscv/include/asm/irq.h" 2


# 1 "./include/asm-generic/irq.h" 1
# 14 "./include/asm-generic/irq.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int irq_canonicalize(int irq)
{
return irq;
}
# 14 "./arch/riscv/include/asm/irq.h" 2

extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) init_IRQ(void);
# 24 "./include/linux/irq.h" 2

# 1 "./arch/riscv/include/generated/asm/irq_regs.h" 1
# 1 "./include/asm-generic/irq_regs.h" 1
# 17 "./include/asm-generic/irq_regs.h"
extern __attribute__((section(".data..percpu" ""))) __typeof__(struct pt_regs *) __irq_regs;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pt_regs *get_irq_regs(void)
{
return ({ __this_cpu_preempt_check("read"); ({ typeof(__irq_regs) pscr_ret__; do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(__irq_regs)) { case 1: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(__irq_regs))) *)(&(__irq_regs))); (typeof((typeof(*(&(__irq_regs))) *)(&(__irq_regs)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 2: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(__irq_regs))) *)(&(__irq_regs))); (typeof((typeof(*(&(__irq_regs))) *)(&(__irq_regs)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 4: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(__irq_regs))) *)(&(__irq_regs))); (typeof((typeof(*(&(__irq_regs))) *)(&(__irq_regs)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 8: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(__irq_regs))) *)(&(__irq_regs))); (typeof((typeof(*(&(__irq_regs))) *)(&(__irq_regs)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
{
struct pt_regs *old_regs;

old_regs = ({ __this_cpu_preempt_check("read"); ({ typeof(__irq_regs) pscr_ret__; do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(__irq_regs)) { case 1: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(__irq_regs))) *)(&(__irq_regs))); (typeof((typeof(*(&(__irq_regs))) *)(&(__irq_regs)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 2: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(__irq_regs))) *)(&(__irq_regs))); (typeof((typeof(*(&(__irq_regs))) *)(&(__irq_regs)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 4: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(__irq_regs))) *)(&(__irq_regs))); (typeof((typeof(*(&(__irq_regs))) *)(&(__irq_regs)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 8: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(__irq_regs))) *)(&(__irq_regs))); (typeof((typeof(*(&(__irq_regs))) *)(&(__irq_regs)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); });
({ __this_cpu_preempt_check("write"); do { do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(__irq_regs)) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(__irq_regs))) *)(&(__irq_regs))); (typeof((typeof(*(&(__irq_regs))) *)(&(__irq_regs)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) = new_regs; } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(__irq_regs))) *)(&(__irq_regs))); (typeof((typeof(*(&(__irq_regs))) *)(&(__irq_regs)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) = new_regs; } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(__irq_regs))) *)(&(__irq_regs))); (typeof((typeof(*(&(__irq_regs))) *)(&(__irq_regs)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) = new_regs; } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&(__irq_regs)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(__irq_regs))) *)(&(__irq_regs))); (typeof((typeof(*(&(__irq_regs))) *)(&(__irq_regs)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) = new_regs; } while (0);break; default: __bad_size_call_parameter();break; } } while (0); });
return old_regs;
}
# 2 "./arch/riscv/include/generated/asm/irq_regs.h" 2
# 26 "./include/linux/irq.h" 2

struct seq_file;
struct module;
struct msi_msg;
struct irq_affinity_desc;
enum irqchip_irq_state;
# 77 "./include/linux/irq.h"
enum {
IRQ_TYPE_NONE = 0x00000000,
IRQ_TYPE_EDGE_RISING = 0x00000001,
IRQ_TYPE_EDGE_FALLING = 0x00000002,
IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
IRQ_TYPE_LEVEL_HIGH = 0x00000004,
IRQ_TYPE_LEVEL_LOW = 0x00000008,
IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
IRQ_TYPE_SENSE_MASK = 0x0000000f,
IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK,

IRQ_TYPE_PROBE = 0x00000010,

IRQ_LEVEL = (1 << 8),
IRQ_PER_CPU = (1 << 9),
IRQ_NOPROBE = (1 << 10),
IRQ_NOREQUEST = (1 << 11),
IRQ_NOAUTOEN = (1 << 12),
IRQ_NO_BALANCING = (1 << 13),
IRQ_MOVE_PCNTXT = (1 << 14),
IRQ_NESTED_THREAD = (1 << 15),
IRQ_NOTHREAD = (1 << 16),
IRQ_PER_CPU_DEVID = (1 << 17),
IRQ_IS_POLLED = (1 << 18),
IRQ_DISABLE_UNLAZY = (1 << 19),
IRQ_HIDDEN = (1 << 20),
IRQ_NO_DEBUG = (1 << 21),
};
# 123 "./include/linux/irq.h"
enum {
IRQ_SET_MASK_OK = 0,
IRQ_SET_MASK_OK_NOCOPY,
IRQ_SET_MASK_OK_DONE,
};

struct msi_desc;
struct irq_domain;
# 147 "./include/linux/irq.h"
struct irq_common_data {
unsigned int state_use_accessors;



void *handler_data;
struct msi_desc *msi_desc;
cpumask_var_t affinity;






};
# 177 "./include/linux/irq.h"
struct irq_data {
u32 mask;
unsigned int irq;
unsigned long hwirq;
struct irq_common_data *common;
struct irq_chip *chip;
struct irq_domain *domain;

struct irq_data *parent_data;

void *chip_data;
};
# 225 "./include/linux/irq.h"
enum {
IRQD_TRIGGER_MASK = 0xf,
IRQD_SETAFFINITY_PENDING = (1 << 8),
IRQD_ACTIVATED = (1 << 9),
IRQD_NO_BALANCING = (1 << 10),
IRQD_PER_CPU = (1 << 11),
IRQD_AFFINITY_SET = (1 << 12),
IRQD_LEVEL = (1 << 13),
IRQD_WAKEUP_STATE = (1 << 14),
IRQD_MOVE_PCNTXT = (1 << 15),
IRQD_IRQ_DISABLED = (1 << 16),
IRQD_IRQ_MASKED = (1 << 17),
IRQD_IRQ_INPROGRESS = (1 << 18),
IRQD_WAKEUP_ARMED = (1 << 19),
IRQD_FORWARDED_TO_VCPU = (1 << 20),
IRQD_AFFINITY_MANAGED = (1 << 21),
IRQD_IRQ_STARTED = (1 << 22),
IRQD_MANAGED_SHUTDOWN = (1 << 23),
IRQD_SINGLE_TARGET = (1 << 24),
IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
IRQD_CAN_RESERVE = (1 << 26),
IRQD_MSI_NOMASK_QUIRK = (1 << 27),
IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30),
};



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_is_setaffinity_pending(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_SETAFFINITY_PENDING;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_is_per_cpu(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_PER_CPU;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_can_balance(struct irq_data *d)
{
return !((((d)->common)->state_use_accessors) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_affinity_was_set(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_AFFINITY_SET;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqd_mark_affinity_was_set(struct irq_data *d)
{
(((d)->common)->state_use_accessors) |= IRQD_AFFINITY_SET;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_trigger_type_was_set(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_DEFAULT_TRIGGER_SET;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 irqd_get_trigger_type(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_TRIGGER_MASK;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqd_set_trigger_type(struct irq_data *d, u32 type)
{
(((d)->common)->state_use_accessors) &= ~IRQD_TRIGGER_MASK;
(((d)->common)->state_use_accessors) |= type & IRQD_TRIGGER_MASK;
(((d)->common)->state_use_accessors) |= IRQD_DEFAULT_TRIGGER_SET;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_is_level_type(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_LEVEL;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqd_set_single_target(struct irq_data *d)
{
(((d)->common)->state_use_accessors) |= IRQD_SINGLE_TARGET;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_is_single_target(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_SINGLE_TARGET;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqd_set_handle_enforce_irqctx(struct irq_data *d)
{
(((d)->common)->state_use_accessors) |= IRQD_HANDLE_ENFORCE_IRQCTX;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_is_handle_enforce_irqctx(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_HANDLE_ENFORCE_IRQCTX;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_is_enabled_on_suspend(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_IRQ_ENABLED_ON_SUSPEND;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_is_wakeup_set(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_WAKEUP_STATE;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_can_move_in_process_context(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_MOVE_PCNTXT;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_irq_disabled(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_IRQ_DISABLED;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_irq_masked(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_IRQ_MASKED;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_irq_inprogress(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_IRQ_INPROGRESS;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_is_wakeup_armed(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_WAKEUP_ARMED;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_FORWARDED_TO_VCPU;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqd_set_forwarded_to_vcpu(struct irq_data *d)
{
(((d)->common)->state_use_accessors) |= IRQD_FORWARDED_TO_VCPU;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
{
(((d)->common)->state_use_accessors) &= ~IRQD_FORWARDED_TO_VCPU;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_affinity_is_managed(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_AFFINITY_MANAGED;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_is_activated(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_ACTIVATED;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqd_set_activated(struct irq_data *d)
{
(((d)->common)->state_use_accessors) |= IRQD_ACTIVATED;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqd_clr_activated(struct irq_data *d)
{
(((d)->common)->state_use_accessors) &= ~IRQD_ACTIVATED;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_is_started(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_IRQ_STARTED;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_is_managed_and_shutdown(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_MANAGED_SHUTDOWN;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqd_set_can_reserve(struct irq_data *d)
{
(((d)->common)->state_use_accessors) |= IRQD_CAN_RESERVE;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqd_clr_can_reserve(struct irq_data *d)
{
(((d)->common)->state_use_accessors) &= ~IRQD_CAN_RESERVE;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_can_reserve(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_CAN_RESERVE;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqd_set_msi_nomask_quirk(struct irq_data *d)
{
(((d)->common)->state_use_accessors) |= IRQD_MSI_NOMASK_QUIRK;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqd_clr_msi_nomask_quirk(struct irq_data *d)
{
(((d)->common)->state_use_accessors) &= ~IRQD_MSI_NOMASK_QUIRK;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_msi_nomask_quirk(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_MSI_NOMASK_QUIRK;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqd_set_affinity_on_activate(struct irq_data *d)
{
(((d)->common)->state_use_accessors) |= IRQD_AFFINITY_ON_ACTIVATE;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irqd_affinity_on_activate(struct irq_data *d)
{
return (((d)->common)->state_use_accessors) & IRQD_AFFINITY_ON_ACTIVATE;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
{
return d->hwirq;
}
# 504 "./include/linux/irq.h"
struct irq_chip {
const char *name;
unsigned int (*irq_startup)(struct irq_data *data);
void (*irq_shutdown)(struct irq_data *data);
void (*irq_enable)(struct irq_data *data);
void (*irq_disable)(struct irq_data *data);

void (*irq_ack)(struct irq_data *data);
void (*irq_mask)(struct irq_data *data);
void (*irq_mask_ack)(struct irq_data *data);
void (*irq_unmask)(struct irq_data *data);
void (*irq_eoi)(struct irq_data *data);

int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
int (*irq_retrigger)(struct irq_data *data);
int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
int (*irq_set_wake)(struct irq_data *data, unsigned int on);

void (*irq_bus_lock)(struct irq_data *data);
void (*irq_bus_sync_unlock)(struct irq_data *data);





void (*irq_suspend)(struct irq_data *data);
void (*irq_resume)(struct irq_data *data);
void (*irq_pm_shutdown)(struct irq_data *data);

void (*irq_calc_mask)(struct irq_data *data);

void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
int (*irq_request_resources)(struct irq_data *data);
void (*irq_release_resources)(struct irq_data *data);

void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);

int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);

int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);

void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);

int (*irq_nmi_setup)(struct irq_data *data);
void (*irq_nmi_teardown)(struct irq_data *data);

unsigned long flags;
};
# 573 "./include/linux/irq.h"
enum {
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
IRQCHIP_EOI_IF_HANDLED = (1 << 1),
IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
IRQCHIP_SKIP_SET_WAKE = (1 << 4),
IRQCHIP_ONESHOT_SAFE = (1 << 5),
IRQCHIP_EOI_THREADED = (1 << 6),
IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
IRQCHIP_SUPPORTS_NMI = (1 << 8),
IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9),
IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10),
};


# 1 "./include/linux/irqdesc.h" 1





# 1 "./include/linux/kobject.h" 1
# 20 "./include/linux/kobject.h"
# 1 "./include/linux/sysfs.h" 1
# 16 "./include/linux/sysfs.h"
# 1 "./include/linux/kernfs.h" 1
# 22 "./include/linux/kernfs.h"
struct file;
struct dentry;
struct iattr;
struct seq_file;
struct vm_area_struct;
struct vm_operations_struct;
struct super_block;
struct file_system_type;
struct poll_table_struct;
struct fs_context;

struct kernfs_fs_context;
struct kernfs_open_node;
struct kernfs_iattrs;

enum kernfs_node_type {
KERNFS_DIR = 0x0001,
KERNFS_FILE = 0x0002,
KERNFS_LINK = 0x0004,
};






enum kernfs_node_flag {
KERNFS_ACTIVATED = 0x0010,
KERNFS_NS = 0x0020,
KERNFS_HAS_SEQ_SHOW = 0x0040,
KERNFS_HAS_MMAP = 0x0080,
KERNFS_LOCKDEP = 0x0100,
KERNFS_SUICIDAL = 0x0400,
KERNFS_SUICIDED = 0x0800,
KERNFS_EMPTY_DIR = 0x1000,
KERNFS_HAS_RELEASE = 0x2000,
};


enum kernfs_root_flag {






KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001,
# 79 "./include/linux/kernfs.h"
KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002,





KERNFS_ROOT_SUPPORT_EXPORTOP = 0x0004,




KERNFS_ROOT_SUPPORT_USER_XATTR = 0x0008,
};


struct kernfs_elem_dir {
unsigned long subdirs;

struct rb_root children;





struct kernfs_root *root;




unsigned long rev;
};

struct kernfs_elem_symlink {
struct kernfs_node *target_kn;
};

struct kernfs_elem_attr {
const struct kernfs_ops *ops;
struct kernfs_open_node *open;
loff_t size;
struct kernfs_node *notify_next;
};
# 131 "./include/linux/kernfs.h"
struct kernfs_node {
atomic_t count;
atomic_t active;

struct lockdep_map dep_map;







struct kernfs_node *parent;
const char *name;

struct rb_node rb;

const void *ns;
unsigned int hash;
union {
struct kernfs_elem_dir dir;
struct kernfs_elem_symlink symlink;
struct kernfs_elem_attr attr;
};

void *priv;





u64 id;

unsigned short flags;
umode_t mode;
struct kernfs_iattrs *iattr;
};
# 176 "./include/linux/kernfs.h"
struct kernfs_syscall_ops {
int (*show_options)(struct seq_file *sf, struct kernfs_root *root);

int (*mkdir)(struct kernfs_node *parent, const char *name,
umode_t mode);
int (*rmdir)(struct kernfs_node *kn);
int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent,
const char *new_name);
int (*show_path)(struct seq_file *sf, struct kernfs_node *kn,
struct kernfs_root *root);
};

struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root);

struct kernfs_open_file {

struct kernfs_node *kn;
struct file *file;
struct seq_file *seq_file;
void *priv;


struct mutex mutex;
struct mutex prealloc_mutex;
int event;
struct list_head list;
char *prealloc_buf;

size_t atomic_write_len;
bool mmapped:1;
bool released:1;
const struct vm_operations_struct *vm_ops;
};

struct kernfs_ops {




int (*open)(struct kernfs_open_file *of);
void (*release)(struct kernfs_open_file *of);
# 229 "./include/linux/kernfs.h"
int (*seq_show)(struct seq_file *sf, void *v);

void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
void (*seq_stop)(struct seq_file *sf, void *v);

ssize_t (*read)(struct kernfs_open_file *of, char *buf, size_t bytes,
loff_t off);
# 245 "./include/linux/kernfs.h"
size_t atomic_write_len;






bool prealloc;
ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
loff_t off);

__poll_t (*poll)(struct kernfs_open_file *of,
struct poll_table_struct *pt);

int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
};




struct kernfs_fs_context {
struct kernfs_root *root;
void *ns_tag;
unsigned long magic;


bool new_sb_created;
};



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
{
return kn->flags & 0x000f;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ino_t kernfs_id_ino(u64 id)
{

if (sizeof(ino_t) >= sizeof(u64))
return id;
else
return (u32)id;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 kernfs_id_gen(u64 id)
{

if (sizeof(ino_t) >= sizeof(u64))
return 1;
else
return id >> 32;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ino_t kernfs_ino(struct kernfs_node *kn)
{
return kernfs_id_ino(kn->id);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ino_t kernfs_gen(struct kernfs_node *kn)
{
return kernfs_id_gen(kn->id);
}
# 317 "./include/linux/kernfs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kernfs_enable_ns(struct kernfs_node *kn)
{
({ int __ret_warn_on = !!(kernfs_type(kn) != KERNFS_DIR); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/kernfs.h"), "i" (319), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
({ int __ret_warn_on = !!(!(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_226(void) ; if (!((sizeof((&kn->dir.children)->rb_node) == sizeof(char) || sizeof((&kn->dir.children)->rb_node) == sizeof(short) || sizeof((&kn->dir.children)->rb_node) == sizeof(int) || sizeof((&kn->dir.children)->rb_node) == sizeof(long)) || sizeof((&kn->dir.children)->rb_node) == sizeof(long long))) __compiletime_assert_226(); } while (0); (*(const volatile typeof( _Generic(((&kn->dir.children)->rb_node), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&kn->dir.children)->rb_node))) *)&((&kn->dir.children)->rb_node)); }) == ((void *)0))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/kernfs.h"), "i" (320), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
kn->flags |= KERNFS_NS;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool kernfs_ns_enabled(struct kernfs_node *kn)
{
return kn->flags & KERNFS_NS;
}

int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn,
char *buf, size_t buflen);
void pr_cont_kernfs_name(struct kernfs_node *kn);
void pr_cont_kernfs_path(struct kernfs_node *kn);
struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
const char *name, const void *ns);
struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
const char *path, const void *ns);
void kernfs_get(struct kernfs_node *kn);
void kernfs_put(struct kernfs_node *kn);

struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry);
struct kernfs_root *kernfs_root_from_sb(struct super_block *sb);
struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn);

struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
struct super_block *sb);
struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
unsigned int flags, void *priv);
void kernfs_destroy_root(struct kernfs_root *root);

struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
kuid_t uid, kgid_t gid,
void *priv, const void *ns);
struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
const char *name);
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
const char *name, umode_t mode,
kuid_t uid, kgid_t gid,
loff_t size,
const struct kernfs_ops *ops,
void *priv, const void *ns,
struct lock_class_key *key);
struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
const char *name,
struct kernfs_node *target);
void kernfs_activate(struct kernfs_node *kn);
void kernfs_remove(struct kernfs_node *kn);
void kernfs_break_active_protection(struct kernfs_node *kn);
void kernfs_unbreak_active_protection(struct kernfs_node *kn);
bool kernfs_remove_self(struct kernfs_node *kn);
int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
const void *ns);
int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
const char *new_name, const void *new_ns);
int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
__poll_t kernfs_generic_poll(struct kernfs_open_file *of,
struct poll_table_struct *pt);
void kernfs_notify(struct kernfs_node *kn);

int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
void *value, size_t size);
int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
const void *value, size_t size, int flags);

const void *kernfs_super_ns(struct super_block *sb);
int kernfs_get_tree(struct fs_context *fc);
void kernfs_free_fs_context(struct fs_context *fc);
void kernfs_kill_sb(struct super_block *sb);

void kernfs_init(void);

struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
u64 id);
# 529 "./include/linux/kernfs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
{
return kernfs_path_from_node(kn, ((void *)0), buf, buflen);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct kernfs_node *
kernfs_find_and_get(struct kernfs_node *kn, const char *name)
{
return kernfs_find_and_get_ns(kn, name, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct kernfs_node *
kernfs_walk_and_get(struct kernfs_node *kn, const char *path)
{
return kernfs_walk_and_get_ns(kn, path, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct kernfs_node *
kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
void *priv)
{
return kernfs_create_dir_ns(parent, name, mode,
(kuid_t){ 0 }, (kgid_t){ 0 },
priv, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int kernfs_remove_by_name(struct kernfs_node *parent,
const char *name)
{
return kernfs_remove_by_name_ns(parent, name, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int kernfs_rename(struct kernfs_node *kn,
struct kernfs_node *new_parent,
const char *new_name)
{
return kernfs_rename_ns(kn, new_parent, new_name, ((void *)0));
}
# 17 "./include/linux/sysfs.h" 2




# 1 "./include/linux/kobject_ns.h" 1
# 19 "./include/linux/kobject_ns.h"
struct sock;
struct kobject;





enum kobj_ns_type {
KOBJ_NS_TYPE_NONE = 0,
KOBJ_NS_TYPE_NET,
KOBJ_NS_TYPES
};
# 39 "./include/linux/kobject_ns.h"
struct kobj_ns_type_operations {
enum kobj_ns_type type;
bool (*current_may_mount)(void);
void *(*grab_current_ns)(void);
const void *(*netlink_ns)(struct sock *sk);
const void *(*initial_ns)(void);
void (*drop_ns)(void *);
};

int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
int kobj_ns_type_registered(enum kobj_ns_type type);
const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);

bool kobj_ns_current_may_mount(enum kobj_ns_type type);
void *kobj_ns_grab_current(enum kobj_ns_type type);
const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
const void *kobj_ns_initial(enum kobj_ns_type type);
void kobj_ns_drop(enum kobj_ns_type type, void *ns);
# 22 "./include/linux/sysfs.h" 2



struct kobject;
struct module;
struct bin_attribute;
enum kobj_ns_type;

struct attribute {
const char *name;
umode_t mode;

bool ignore_lockdep:1;
struct lock_class_key *key;
struct lock_class_key skey;

};
# 84 "./include/linux/sysfs.h"
struct attribute_group {
const char *name;
umode_t (*is_visible)(struct kobject *,
struct attribute *, int);
umode_t (*is_bin_visible)(struct kobject *,
struct bin_attribute *, int);
struct attribute **attrs;
struct bin_attribute **bin_attrs;
};
# 171 "./include/linux/sysfs.h"
struct file;
struct vm_area_struct;
struct address_space;

struct bin_attribute {
struct attribute attr;
size_t size;
void *private;
struct address_space *(*f_mapping)(void);
ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
struct vm_area_struct *vma);
};
# 238 "./include/linux/sysfs.h"
struct sysfs_ops {
ssize_t (*show)(struct kobject *, struct attribute *, char *);
ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
};



int __attribute__((__warn_unused_result__)) sysfs_create_dir_ns(struct kobject *kobj, const void *ns);
void sysfs_remove_dir(struct kobject *kobj);
int __attribute__((__warn_unused_result__)) sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
const void *new_ns);
int __attribute__((__warn_unused_result__)) sysfs_move_dir_ns(struct kobject *kobj,
struct kobject *new_parent_kobj,
const void *new_ns);
int __attribute__((__warn_unused_result__)) sysfs_create_mount_point(struct kobject *parent_kobj,
const char *name);
void sysfs_remove_mount_point(struct kobject *parent_kobj,
const char *name);

int __attribute__((__warn_unused_result__)) sysfs_create_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns);
int __attribute__((__warn_unused_result__)) sysfs_create_files(struct kobject *kobj,
const struct attribute * const *attr);
int __attribute__((__warn_unused_result__)) sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
const struct attribute *attr);
void sysfs_unbreak_active_protection(struct kernfs_node *kn);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr);

int __attribute__((__warn_unused_result__)) sysfs_create_bin_file(struct kobject *kobj,
const struct bin_attribute *attr);
void sysfs_remove_bin_file(struct kobject *kobj,
const struct bin_attribute *attr);

int __attribute__((__warn_unused_result__)) sysfs_create_link(struct kobject *kobj, struct kobject *target,
const char *name);
int __attribute__((__warn_unused_result__)) sysfs_create_link_nowarn(struct kobject *kobj,
struct kobject *target,
const char *name);
void sysfs_remove_link(struct kobject *kobj, const char *name);

int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *target,
const char *old_name, const char *new_name,
const void *new_ns);

void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
const char *name);

int __attribute__((__warn_unused_result__)) sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp);
int __attribute__((__warn_unused_result__)) sysfs_create_groups(struct kobject *kobj,
const struct attribute_group **groups);
int __attribute__((__warn_unused_result__)) sysfs_update_groups(struct kobject *kobj,
const struct attribute_group **groups);
int sysfs_update_group(struct kobject *kobj,
const struct attribute_group *grp);
void sysfs_remove_group(struct kobject *kobj,
const struct attribute_group *grp);
void sysfs_remove_groups(struct kobject *kobj,
const struct attribute_group **groups);
int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group);
void sysfs_remove_file_from_group(struct kobject *kobj,
const struct attribute *attr, const char *group);
int sysfs_merge_group(struct kobject *kobj,
const struct attribute_group *grp);
void sysfs_unmerge_group(struct kobject *kobj,
const struct attribute_group *grp);
int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name,
struct kobject *target, const char *link_name);
void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
const char *link_name);
int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
struct kobject *target_kobj,
const char *target_name,
const char *symlink_name);

void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);

int __attribute__((__warn_unused_result__)) sysfs_init(void);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sysfs_enable_ns(struct kernfs_node *kn)
{
return kernfs_enable_ns(kn);
}

int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid,
kgid_t kgid);
int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid);
int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ,
const char *name, kuid_t kuid, kgid_t kgid);
int sysfs_groups_change_owner(struct kobject *kobj,
const struct attribute_group **groups,
kuid_t kuid, kgid_t kgid);
int sysfs_group_change_owner(struct kobject *kobj,
const struct attribute_group *groups, kuid_t kuid,
kgid_t kgid);
__attribute__((__format__(printf, 2, 3)))
int sysfs_emit(char *buf, const char *fmt, ...);
__attribute__((__format__(printf, 3, 4)))
int sysfs_emit_at(char *buf, int at, const char *fmt, ...);
# 604 "./include/linux/sysfs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) sysfs_create_file(struct kobject *kobj,
const struct attribute *attr)
{
return sysfs_create_file_ns(kobj, attr, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sysfs_remove_file(struct kobject *kobj,
const struct attribute *attr)
{
sysfs_remove_file_ns(kobj, attr, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sysfs_rename_link(struct kobject *kobj, struct kobject *target,
const char *old_name, const char *new_name)
{
return sysfs_rename_link_ns(kobj, target, old_name, new_name, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sysfs_notify_dirent(struct kernfs_node *kn)
{
kernfs_notify(kn);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct kernfs_node *sysfs_get_dirent(struct kernfs_node *parent,
const char *name)
{
return kernfs_find_and_get(parent, name);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct kernfs_node *sysfs_get(struct kernfs_node *kn)
{
kernfs_get(kn);
return kn;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sysfs_put(struct kernfs_node *kn)
{
kernfs_put(kn);
}
# 21 "./include/linux/kobject.h" 2
# 41 "./include/linux/kobject.h"
extern u64 uevent_seqnum;
# 53 "./include/linux/kobject.h"
enum kobject_action {
KOBJ_ADD,
KOBJ_REMOVE,
KOBJ_CHANGE,
KOBJ_MOVE,
KOBJ_ONLINE,
KOBJ_OFFLINE,
KOBJ_BIND,
KOBJ_UNBIND,
};

struct kobject {
const char *name;
struct list_head entry;
struct kobject *parent;
struct kset *kset;
const struct kobj_type *ktype;
struct kernfs_node *sd;
struct kref kref;



unsigned int state_initialized:1;
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;
unsigned int state_remove_uevent_sent:1;
unsigned int uevent_suppress:1;
};

extern __attribute__((__format__(printf, 2, 3)))
int kobject_set_name(struct kobject *kobj, const char *name, ...);
extern __attribute__((__format__(printf, 2, 0)))
int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
va_list vargs);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *kobject_name(const struct kobject *kobj)
{
return kobj->name;
}

extern void kobject_init(struct kobject *kobj, const struct kobj_type *ktype);
extern __attribute__((__format__(printf, 3, 4))) __attribute__((__warn_unused_result__))
int kobject_add(struct kobject *kobj, struct kobject *parent,
const char *fmt, ...);
extern __attribute__((__format__(printf, 4, 5))) __attribute__((__warn_unused_result__))
int kobject_init_and_add(struct kobject *kobj,
const struct kobj_type *ktype, struct kobject *parent,
const char *fmt, ...);

extern void kobject_del(struct kobject *kobj);

extern struct kobject * __attribute__((__warn_unused_result__)) kobject_create_and_add(const char *name,
struct kobject *parent);

extern int __attribute__((__warn_unused_result__)) kobject_rename(struct kobject *, const char *new_name);
extern int __attribute__((__warn_unused_result__)) kobject_move(struct kobject *, struct kobject *);

extern struct kobject *kobject_get(struct kobject *kobj);
extern struct kobject * __attribute__((__warn_unused_result__)) kobject_get_unless_zero(
struct kobject *kobj);
extern void kobject_put(struct kobject *kobj);

extern const void *kobject_namespace(struct kobject *kobj);
extern void kobject_get_ownership(struct kobject *kobj,
kuid_t *uid, kgid_t *gid);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);

struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
const struct attribute_group **default_groups;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj);
void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid);
};

struct kobj_uevent_env {
char *argv[3];
char *envp[64];
int envp_idx;
char buf[2048];
int buflen;
};

struct kset_uevent_ops {
int (* const filter)(struct kobject *kobj);
const char *(* const name)(struct kobject *kobj);
int (* const uevent)(struct kobject *kobj, struct kobj_uevent_env *env);
};

struct kobj_attribute {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
char *buf);
ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count);
};

extern const struct sysfs_ops kobj_sysfs_ops;

struct sock;
# 172 "./include/linux/kobject.h"
struct kset {
struct list_head list;
spinlock_t list_lock;
struct kobject kobj;
const struct kset_uevent_ops *uevent_ops;
} ;

extern void kset_init(struct kset *kset);
extern int __attribute__((__warn_unused_result__)) kset_register(struct kset *kset);
extern void kset_unregister(struct kset *kset);
extern struct kset * __attribute__((__warn_unused_result__)) kset_create_and_add(const char *name,
const struct kset_uevent_ops *u,
struct kobject *parent_kobj);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct kset *to_kset(struct kobject *kobj)
{
return kobj ? ({ void *__mptr = (void *)(kobj); _Static_assert(__builtin_types_compatible_p(typeof(*(kobj)), typeof(((struct kset *)0)->kobj)) || __builtin_types_compatible_p(typeof(*(kobj)), typeof(void)), "pointer type mismatch in container_of()"); ((struct kset *)(__mptr - __builtin_offsetof(struct kset, kobj))); }) : ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct kset *kset_get(struct kset *k)
{
return k ? to_kset(kobject_get(&k->kobj)) : ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kset_put(struct kset *k)
{
kobject_put(&k->kobj);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct kobj_type *get_ktype(struct kobject *kobj)
{
return kobj->ktype;
}

extern struct kobject *kset_find_obj(struct kset *, const char *);


extern struct kobject *kernel_kobj;

extern struct kobject *mm_kobj;

extern struct kobject *hypervisor_kobj;

extern struct kobject *power_kobj;

extern struct kobject *firmware_kobj;

int kobject_uevent(struct kobject *kobj, enum kobject_action action);
int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
char *envp[]);
int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count);

__attribute__((__format__(printf, 2, 3)))
int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...);
# 7 "./include/linux/irqdesc.h" 2






struct irq_affinity_notify;
struct proc_dir_entry;
struct module;
struct irq_desc;
struct irq_domain;
struct pt_regs;
# 55 "./include/linux/irqdesc.h"
struct irq_desc {
struct irq_common_data irq_common_data;
struct irq_data irq_data;
unsigned int *kstat_irqs;
irq_flow_handler_t handle_irq;
struct irqaction *action;
unsigned int status_use_accessors;
unsigned int core_internal_state__do_not_mess_with_it;
unsigned int depth;
unsigned int wake_depth;
unsigned int tot_count;
unsigned int irq_count;
unsigned long last_unhandled;
unsigned int irqs_unhandled;
atomic_t threads_handled;
int threads_handled_last;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
const struct cpumask *percpu_affinity;

const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;




unsigned long threads_oneshot;
atomic_t threads_active;
wait_queue_head_t wait_for_threads;







struct proc_dir_entry *dir;






struct callback_head rcu;
struct kobject kobj;

struct mutex request_mutex;
int parent_irq;
struct module *owner;
const char *name;
} __attribute__((__aligned__(1 << (6))));


extern void irq_lock_sparse(void);
extern void irq_unlock_sparse(void);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int irq_desc_kstat_cpu(struct irq_desc *desc,
unsigned int cpu)
{
return desc->kstat_irqs ? *({ do { const void *__vpp_verify = (typeof((desc->kstat_irqs) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((desc->kstat_irqs))) *)((desc->kstat_irqs))); (typeof((typeof(*((desc->kstat_irqs))) *)((desc->kstat_irqs)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }) : 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct irq_desc *irq_data_to_desc(struct irq_data *data)
{
return ({ void *__mptr = (void *)(data->common); _Static_assert(__builtin_types_compatible_p(typeof(*(data->common)), typeof(((struct irq_desc *)0)->irq_common_data)) || __builtin_types_compatible_p(typeof(*(data->common)), typeof(void)), "pointer type mismatch in container_of()"); ((struct irq_desc *)(__mptr - __builtin_offsetof(struct irq_desc, irq_common_data))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int irq_desc_get_irq(struct irq_desc *desc)
{
return desc->irq_data.irq;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
{
return &desc->irq_data;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
{
return desc->irq_data.chip;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *irq_desc_get_chip_data(struct irq_desc *desc)
{
return desc->irq_data.chip_data;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *irq_desc_get_handler_data(struct irq_desc *desc)
{
return desc->irq_common_data.handler_data;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void generic_handle_irq_desc(struct irq_desc *desc)
{
desc->handle_irq(desc);
}

int handle_irq_desc(struct irq_desc *desc);
int generic_handle_irq(unsigned int irq);
int generic_handle_irq_safe(unsigned int irq);







int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq);
int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq);



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int irq_desc_has_action(struct irq_desc *desc)
{
return desc && desc->action != ((void *)0);
}
# 191 "./include/linux/irqdesc.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_set_handler_locked(struct irq_data *data,
irq_flow_handler_t handler)
{
struct irq_desc *desc = irq_data_to_desc(data);

desc->handle_irq = handler;
}
# 211 "./include/linux/irqdesc.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
irq_flow_handler_t handler, const char *name)
{
struct irq_desc *desc = irq_data_to_desc(data);

desc->handle_irq = handler;
desc->name = name;
data->chip = chip;
}

bool irq_check_status_bit(unsigned int irq, unsigned int bitmask);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irq_balancing_disabled(unsigned int irq)
{
return irq_check_status_bit(irq, (IRQ_PER_CPU | IRQ_NO_BALANCING));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irq_is_percpu(unsigned int irq)
{
return irq_check_status_bit(irq, IRQ_PER_CPU);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irq_is_percpu_devid(unsigned int irq)
{
return irq_check_status_bit(irq, IRQ_PER_CPU_DEVID);
}

void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
struct lock_class_key *request_class);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
struct lock_class_key *request_class)
{
if (1)
__irq_set_lockdep_class(irq, lock_class, request_class);
}
# 588 "./include/linux/irq.h" 2




# 1 "./arch/riscv/include/generated/asm/hw_irq.h" 1
# 1 "./include/asm-generic/hw_irq.h" 1
# 2 "./arch/riscv/include/generated/asm/hw_irq.h" 2
# 593 "./include/linux/irq.h" 2
# 604 "./include/linux/irq.h"
struct irqaction;
extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);





extern int irq_set_affinity_locked(struct irq_data *data,
const struct cpumask *cpumask, bool force);
extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);


extern void irq_migrate_all_off_this_cpu(void);
extern int irq_affinity_online_cpu(unsigned int cpu);
# 633 "./include/linux/irq.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_move_irq(struct irq_data *data) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_move_masked_irq(struct irq_data *data) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_force_complete_move(struct irq_desc *desc) { }


extern int no_irq_affinity;




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int irq_set_parent(int irq, int parent_irq)
{
return 0;
}






extern void handle_level_irq(struct irq_desc *desc);
extern void handle_fasteoi_irq(struct irq_desc *desc);
extern void handle_edge_irq(struct irq_desc *desc);
extern void handle_edge_eoi_irq(struct irq_desc *desc);
extern void handle_simple_irq(struct irq_desc *desc);
extern void handle_untracked_irq(struct irq_desc *desc);
extern void handle_percpu_irq(struct irq_desc *desc);
extern void handle_percpu_devid_irq(struct irq_desc *desc);
extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);

extern void handle_fasteoi_nmi(struct irq_desc *desc);
extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc);

extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
extern int irq_chip_pm_put(struct irq_data *data);

extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
extern int irq_chip_set_parent_state(struct irq_data *data,
enum irqchip_irq_state which,
bool val);
extern int irq_chip_get_parent_state(struct irq_data *data,
enum irqchip_irq_state which,
bool *state);
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
extern void irq_chip_mask_parent(struct irq_data *data);
extern void irq_chip_mask_ack_parent(struct irq_data *data);
extern void irq_chip_unmask_parent(struct irq_data *data);
extern void irq_chip_eoi_parent(struct irq_data *data);
extern int irq_chip_set_affinity_parent(struct irq_data *data,
const struct cpumask *dest,
bool force);
extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
void *vcpu_info);
extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
extern int irq_chip_request_resources_parent(struct irq_data *data);
extern void irq_chip_release_resources_parent(struct irq_data *data);



extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);



extern int noirqdebug_setup(char *str);


extern int can_request_irq(unsigned int irq, unsigned long irqflags);


extern struct irq_chip no_irq_chip;
extern struct irq_chip dummy_irq_chip;

extern void
irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
irq_flow_handler_t handle, const char *name);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_set_chip_and_handler(unsigned int irq,
const struct irq_chip *chip,
irq_flow_handler_t handle)
{
irq_set_chip_and_handler_name(irq, chip, handle, ((void *)0));
}

extern int irq_set_percpu_devid(unsigned int irq);
extern int irq_set_percpu_devid_partition(unsigned int irq,
const struct cpumask *affinity);
extern int irq_get_percpu_devid_partition(unsigned int irq,
struct cpumask *affinity);

extern void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
{
__irq_set_handler(irq, handle, 0, ((void *)0));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
{
__irq_set_handler(irq, handle, 1, ((void *)0));
}






void
irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
void *data);

void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_set_status_flags(unsigned int irq, unsigned long set)
{
irq_modify_status(irq, 0, set);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_clear_status_flags(unsigned int irq, unsigned long clr)
{
irq_modify_status(irq, clr, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_set_noprobe(unsigned int irq)
{
irq_modify_status(irq, 0, IRQ_NOPROBE);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_set_probe(unsigned int irq)
{
irq_modify_status(irq, IRQ_NOPROBE, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_set_nothread(unsigned int irq)
{
irq_modify_status(irq, 0, IRQ_NOTHREAD);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_set_thread(unsigned int irq)
{
irq_modify_status(irq, IRQ_NOTHREAD, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_set_nested_thread(unsigned int irq, bool nest)
{
if (nest)
irq_set_status_flags(irq, IRQ_NESTED_THREAD);
else
irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_set_percpu_devid_flags(unsigned int irq)
{
irq_set_status_flags(irq,
IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
}


extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip);
extern int irq_set_handler_data(unsigned int irq, void *data);
extern int irq_set_chip_data(unsigned int irq, void *data);
extern int irq_set_irq_type(unsigned int irq, unsigned int type);
extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
struct msi_desc *entry);
extern struct irq_data *irq_get_irq_data(unsigned int irq);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct irq_chip *irq_get_chip(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->chip : ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
{
return d->chip;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *irq_get_chip_data(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->chip_data : ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *irq_data_get_irq_chip_data(struct irq_data *d)
{
return d->chip_data;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *irq_get_handler_data(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->common->handler_data : ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *irq_data_get_irq_handler_data(struct irq_data *d)
{
return d->common->handler_data;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct msi_desc *irq_get_msi_desc(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? d->common->msi_desc : ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
{
return d->common->msi_desc;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 irq_get_trigger_type(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? irqd_get_trigger_type(d) : 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int irq_common_data_get_node(struct irq_common_data *d)
{



return 0;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int irq_data_get_node(struct irq_data *d)
{
return irq_common_data_get_node(d->common);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cpumask *irq_get_affinity_mask(int irq)
{
struct irq_data *d = irq_get_irq_data(irq);

return d ? d->common->affinity : ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
{
return d->common->affinity;
}
# 904 "./include/linux/irq.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_data_update_effective_affinity(struct irq_data *d,
const struct cpumask *m)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
return d->common->affinity;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);

return d ? irq_data_get_effective_affinity_mask(d) : ((void *)0);
}

unsigned int arch_dynirq_lower_bound(unsigned int from);

int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
struct module *owner,
const struct irq_affinity_desc *affinity);

int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
unsigned int cnt, int node, struct module *owner,
const struct irq_affinity_desc *affinity);
# 963 "./include/linux/irq.h"
void irq_free_descs(unsigned int irq, unsigned int cnt);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_free_desc(unsigned int irq)
{
irq_free_descs(irq, 1);
}
# 983 "./include/linux/irq.h"
struct irq_chip_regs {
unsigned long enable;
unsigned long disable;
unsigned long mask;
unsigned long ack;
unsigned long eoi;
unsigned long type;
unsigned long polarity;
};
# 1006 "./include/linux/irq.h"
struct irq_chip_type {
struct irq_chip chip;
struct irq_chip_regs regs;
irq_flow_handler_t handler;
u32 type;
u32 mask_cache_priv;
u32 *mask_cache;
};
# 1048 "./include/linux/irq.h"
struct irq_chip_generic {
raw_spinlock_t lock;
void *reg_base;
u32 (*reg_readl)(void *addr);
void (*reg_writel)(u32 val, void *addr);
void (*suspend)(struct irq_chip_generic *gc);
void (*resume)(struct irq_chip_generic *gc);
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
u32 type_cache;
u32 polarity_cache;
u32 wake_enabled;
u32 wake_active;
unsigned int num_ct;
void *private;
unsigned long installed;
unsigned long unused;
struct irq_domain *domain;
struct list_head list;
struct irq_chip_type chip_types[];
};
# 1081 "./include/linux/irq.h"
enum irq_gc_flags {
IRQ_GC_INIT_MASK_CACHE = 1 << 0,
IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
IRQ_GC_NO_MASK = 1 << 3,
IRQ_GC_BE_IO = 1 << 4,
};
# 1098 "./include/linux/irq.h"
struct irq_domain_chip_generic {
unsigned int irqs_per_chip;
unsigned int num_chips;
unsigned int irq_flags_to_clear;
unsigned int irq_flags_to_set;
enum irq_gc_flags gc_flags;
struct irq_chip_generic *gc[];
};


void irq_gc_noop(struct irq_data *d);
void irq_gc_mask_disable_reg(struct irq_data *d);
void irq_gc_mask_set_bit(struct irq_data *d);
void irq_gc_mask_clr_bit(struct irq_data *d);
void irq_gc_unmask_enable_reg(struct irq_data *d);
void irq_gc_ack_set_bit(struct irq_data *d);
void irq_gc_ack_clr_bit(struct irq_data *d);
void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
void irq_gc_eoi(struct irq_data *d);
int irq_gc_set_wake(struct irq_data *d, unsigned int on);


int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw_irq);
struct irq_chip_generic *
irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
void *reg_base, irq_flow_handler_t handler);
void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
enum irq_gc_flags flags, unsigned int clr,
unsigned int set);
int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
unsigned int clr, unsigned int set);

struct irq_chip_generic *
devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
unsigned int irq_base, void *reg_base,
irq_flow_handler_t handler);
int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
u32 msk, enum irq_gc_flags flags,
unsigned int clr, unsigned int set);

struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);

int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
int num_ct, const char *name,
irq_flow_handler_t handler,
unsigned int clr, unsigned int set,
enum irq_gc_flags flags);
# 1156 "./include/linux/irq.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_free_generic_chip(struct irq_chip_generic *gc)
{
kfree(gc);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_destroy_generic_chip(struct irq_chip_generic *gc,
u32 msk, unsigned int clr,
unsigned int set)
{
irq_remove_generic_chip(gc, msk, clr, set);
irq_free_generic_chip(gc);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
{
return ({ void *__mptr = (void *)(d->chip); _Static_assert(__builtin_types_compatible_p(typeof(*(d->chip)), typeof(((struct irq_chip_type *)0)->chip)) || __builtin_types_compatible_p(typeof(*(d->chip)), typeof(void)), "pointer type mismatch in container_of()"); ((struct irq_chip_type *)(__mptr - __builtin_offsetof(struct irq_chip_type, chip))); });
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_gc_lock(struct irq_chip_generic *gc)
{
_raw_spin_lock(&gc->lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_gc_unlock(struct irq_chip_generic *gc)
{
_raw_spin_unlock(&gc->lock);
}
# 1201 "./include/linux/irq.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irq_reg_writel(struct irq_chip_generic *gc,
u32 val, int reg_offset)
{
if (gc->reg_writel)
gc->reg_writel(val, gc->reg_base + reg_offset);
else
({ __asm__ __volatile__ ("fence w,o" : : : "memory"); ((void)__raw_writel(( u32)(( __le32)(__u32)((val))), ((gc->reg_base + reg_offset)))); mmiowb_set_pending(); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 irq_reg_readl(struct irq_chip_generic *gc,
int reg_offset)
{
if (gc->reg_readl)
return gc->reg_readl(gc->reg_base + reg_offset);
else
return ({ u32 __v; do {} while (0); __v = ({ u32 __r = (( __u32)(__le32)(( __le32)__raw_readl(gc->reg_base + reg_offset))); __r; }); __asm__ __volatile__ ("fence i,r" : : : "memory"); __v; });
}

struct irq_matrix;
struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
unsigned int alloc_start,
unsigned int alloc_end);
void irq_matrix_online(struct irq_matrix *m);
void irq_matrix_offline(struct irq_matrix *m);
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
unsigned int *mapped_cpu);
void irq_matrix_reserve(struct irq_matrix *m);
void irq_matrix_remove_reserved(struct irq_matrix *m);
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
bool reserved, unsigned int *mapped_cpu);
void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
unsigned int bit, bool managed);
void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
unsigned int irq_matrix_allocated(struct irq_matrix *m);
unsigned int irq_matrix_reserved(struct irq_matrix *m);
void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);



irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
int ipi_send_single(unsigned int virq, unsigned int cpu);
int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
# 1259 "./include/linux/irq.h"
int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) set_handle_irq(void (*handle_irq)(struct pt_regs *));





extern void (*handle_arch_irq)(struct pt_regs *) __attribute__((__section__(".data..ro_after_init")));
void generic_handle_arch_irq(struct pt_regs *regs);
# 18 "./include/asm-generic/hardirq.h" 2


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ack_bad_irq(unsigned int irq)
{
({ do {} while (0); _printk("\001" "2" "unexpected IRQ trap at vector %02x\n", irq); });
}
# 2 "./arch/riscv/include/generated/asm/hardirq.h" 2
# 12 "./include/linux/hardirq.h" 2

extern void synchronize_irq(unsigned int irq);
extern bool synchronize_hardirq(unsigned int irq);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __rcu_irq_enter_check_tick(void) { }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void rcu_irq_enter_check_tick(void)
{
if (context_tracking_enabled())
__rcu_irq_enter_check_tick();
}
# 55 "./include/linux/hardirq.h"
void irq_enter(void);



void irq_enter_rcu(void);
# 83 "./include/linux/hardirq.h"
void irq_exit(void);




void irq_exit_rcu(void);
# 99 "./include/linux/hardirq.h"
extern void rcu_nmi_enter(void);
extern void rcu_nmi_exit(void);
# 12 "./include/linux/highmem.h" 2

# 1 "././include/linux/highmem-internal.h" 1
# 20 "././include/linux/highmem-internal.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kmap_local_fork(struct task_struct *tsk) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kmap_assert_nomap(void) { }
# 154 "././include/linux/highmem-internal.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *kmap_to_page(void *addr)
{
return ((((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (((((({ unsigned long _x = (unsigned long)(addr); ((_x) >= kernel_map.page_offset && (!1 || (_x) < kernel_map.page_offset + (((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2))) ? ((unsigned long)(_x) - kernel_map.va_pa_offset) : ({ unsigned long _y = _x; (0 && _y < kernel_map.virt_addr + 0) ? ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - 0); }); })) >> (12)))))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kmap(struct page *page)
{
do { __might_sleep("./include/linux/highmem-internal.h", 161); __cond_resched(); } while (0);
return lowmem_page_address(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kunmap_high(struct page *page) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kmap_flush_unused(void) { }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kunmap(struct page *page)
{



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kmap_local_page(struct page *page)
{
return lowmem_page_address(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kmap_local_folio(struct folio *folio, size_t offset)
{
return lowmem_page_address(&folio->page) + offset;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kmap_local_page_prot(struct page *page, pgprot_t prot)
{
return kmap_local_page(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kmap_local_pfn(unsigned long pfn)
{
return kmap_local_page((((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (pfn)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __kunmap_local(void *addr)
{



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kmap_atomic(struct page *page)
{
if (0)
migrate_disable();
else
do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);
pagefault_disable();
return lowmem_page_address(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
return kmap_atomic(page);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kmap_atomic_pfn(unsigned long pfn)
{
return kmap_atomic((((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (pfn)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __kunmap_atomic(void *addr)
{



pagefault_enable();
if (0)
migrate_enable();
else
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int nr_free_highpages(void) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long totalhigh_pages(void) { return 0UL; }
# 14 "./include/linux/highmem.h" 2
# 36 "./include/linux/highmem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kmap(struct page *page);
# 45 "./include/linux/highmem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kunmap(struct page *page);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *kmap_to_page(void *addr);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kmap_flush_unused(void);
# 96 "./include/linux/highmem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kmap_local_page(struct page *page);
# 133 "./include/linux/highmem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kmap_local_folio(struct folio *folio, size_t offset);
# 146 "./include/linux/highmem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kmap_atomic(struct page *page);
# 160 "./include/linux/highmem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int nr_free_highpages(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long totalhigh_pages(void);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flush_kernel_vmap_range(void *vaddr, int size)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void invalidate_kernel_vmap_range(void *vaddr, int size)
{
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *addr = kmap_local_page(page);
memset((addr), 0, ((1UL) << (12)));
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_227(void) ; if (!(!(__builtin_types_compatible_p(typeof((addr)), typeof(struct page *))))) __compiletime_assert_227(); } while (0); __kunmap_local(addr); } while (0);
}
# 201 "./include/linux/highmem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
unsigned long vaddr)
{
struct page *page = alloc_pages(((((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( gfp_t)0x100000u)) | (( gfp_t)0x02u)) | (( gfp_t)0x08u) | (( gfp_t)0)), 0);

if (page)
clear_user_highpage(page, vaddr);

return page;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_highpage(struct page *page)
{
void *kaddr = kmap_local_page(page);
memset((kaddr), 0, ((1UL) << (12)));
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_228(void) ; if (!(!(__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *))))) __compiletime_assert_228(); } while (0); __kunmap_local(kaddr); } while (0);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tag_clear_highpage(struct page *page)
{
}
# 237 "./include/linux/highmem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void zero_user_segments(struct page *page,
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
{
void *kaddr = kmap_local_page(page);
unsigned int i;

do { if (__builtin_expect(!!(end1 > page_size(page) || end2 > page_size(page)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/highmem.h"), "i" (244), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);

if (end1 > start1)
memset(kaddr + start1, 0, end1 - start1);

if (end2 > start2)
memset(kaddr + start2, 0, end2 - start2);

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_229(void) ; if (!(!(__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *))))) __compiletime_assert_229(); } while (0); __kunmap_local(kaddr); } while (0);
for (i = 0; i < compound_nr(page); i++)
flush_dcache_page(page + i);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void zero_user_segment(struct page *page,
unsigned start, unsigned end)
{
zero_user_segments(page, start, end, 0, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void zero_user(struct page *page,
unsigned start, unsigned size)
{
zero_user_segments(page, start, start + size, 0, 0);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
char *vfrom, *vto;

vfrom = kmap_local_page(from);
vto = kmap_local_page(to);
memcpy((vto), (vfrom), ((1UL) << (12)));
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_230(void) ; if (!(!(__builtin_types_compatible_p(typeof((vto)), typeof(struct page *))))) __compiletime_assert_230(); } while (0); __kunmap_local(vto); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_231(void) ; if (!(!(__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *))))) __compiletime_assert_231(); } while (0); __kunmap_local(vfrom); } while (0);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void copy_highpage(struct page *to, struct page *from)
{
char *vfrom, *vto;

vfrom = kmap_local_page(from);
vto = kmap_local_page(to);
memcpy((vto), (vfrom), ((1UL) << (12)));
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_232(void) ; if (!(!(__builtin_types_compatible_p(typeof((vto)), typeof(struct page *))))) __compiletime_assert_232(); } while (0); __kunmap_local(vto); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_233(void) ; if (!(!(__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *))))) __compiletime_assert_233(); } while (0); __kunmap_local(vfrom); } while (0);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memcpy_page(struct page *dst_page, size_t dst_off,
struct page *src_page, size_t src_off,
size_t len)
{
char *dst = kmap_local_page(dst_page);
char *src = kmap_local_page(src_page);

do { if (__builtin_expect(!!(dst_off + len > ((1UL) << (12)) || src_off + len > ((1UL) << (12))), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/highmem.h"), "i" (308), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
memcpy(dst + dst_off, src + src_off, len);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_234(void) ; if (!(!(__builtin_types_compatible_p(typeof((src)), typeof(struct page *))))) __compiletime_assert_234(); } while (0); __kunmap_local(src); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_235(void) ; if (!(!(__builtin_types_compatible_p(typeof((dst)), typeof(struct page *))))) __compiletime_assert_235(); } while (0); __kunmap_local(dst); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memmove_page(struct page *dst_page, size_t dst_off,
struct page *src_page, size_t src_off,
size_t len)
{
char *dst = kmap_local_page(dst_page);
char *src = kmap_local_page(src_page);

do { if (__builtin_expect(!!(dst_off + len > ((1UL) << (12)) || src_off + len > ((1UL) << (12))), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/highmem.h"), "i" (321), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
memmove(dst + dst_off, src + src_off, len);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_236(void) ; if (!(!(__builtin_types_compatible_p(typeof((src)), typeof(struct page *))))) __compiletime_assert_236(); } while (0); __kunmap_local(src); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_237(void) ; if (!(!(__builtin_types_compatible_p(typeof((dst)), typeof(struct page *))))) __compiletime_assert_237(); } while (0); __kunmap_local(dst); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memset_page(struct page *page, size_t offset, int val,
size_t len)
{
char *addr = kmap_local_page(page);

do { if (__builtin_expect(!!(offset + len > ((1UL) << (12))), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/highmem.h"), "i" (332), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
memset(addr + offset, val, len);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_238(void) ; if (!(!(__builtin_types_compatible_p(typeof((addr)), typeof(struct page *))))) __compiletime_assert_238(); } while (0); __kunmap_local(addr); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memcpy_from_page(char *to, struct page *page,
size_t offset, size_t len)
{
char *from = kmap_local_page(page);

do { if (__builtin_expect(!!(offset + len > ((1UL) << (12))), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/highmem.h"), "i" (342), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
memcpy(to, from + offset, len);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_239(void) ; if (!(!(__builtin_types_compatible_p(typeof((from)), typeof(struct page *))))) __compiletime_assert_239(); } while (0); __kunmap_local(from); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memcpy_to_page(struct page *page, size_t offset,
const char *from, size_t len)
{
char *to = kmap_local_page(page);

do { if (__builtin_expect(!!(offset + len > ((1UL) << (12))), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/highmem.h"), "i" (352), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
memcpy(to + offset, from, len);
flush_dcache_page(page);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_240(void) ; if (!(!(__builtin_types_compatible_p(typeof((to)), typeof(struct page *))))) __compiletime_assert_240(); } while (0); __kunmap_local(to); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memzero_page(struct page *page, size_t offset, size_t len)
{
char *addr = kmap_local_page(page);
memset(addr + offset, 0, len);
flush_dcache_page(page);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_241(void) ; if (!(!(__builtin_types_compatible_p(typeof((addr)), typeof(struct page *))))) __compiletime_assert_241(); } while (0); __kunmap_local(addr); } while (0);
}
# 374 "./include/linux/highmem.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_zero_segments(struct folio *folio,
size_t start1, size_t xend1, size_t start2, size_t xend2)
{
zero_user_segments(&folio->page, start1, xend1, start2, xend2);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_zero_segment(struct folio *folio,
size_t start, size_t xend)
{
zero_user_segments(&folio->page, start, xend, 0, 0);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_zero_range(struct folio *folio,
size_t start, size_t length)
{
zero_user_segments(&folio->page, start, start + length, 0, 0);
}
# 11 "./include/linux/bvec.h" 2







struct page;
# 32 "./include/linux/bvec.h"
struct bio_vec {
struct page *bv_page;
unsigned int bv_len;
unsigned int bv_offset;
};

struct bvec_iter {
sector_t bi_sector;

unsigned int bi_size;

unsigned int bi_idx;

unsigned int bi_bvec_done;

} __attribute__((__packed__));

struct bvec_iter_all {
struct bio_vec bv;
int idx;
unsigned done;
};
# 101 "./include/linux/bvec.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bvec_iter_advance(const struct bio_vec *bv,
struct bvec_iter *iter, unsigned bytes)
{
unsigned int idx = iter->bi_idx;

if (({ static bool __attribute__((__section__(".data.once"))) __already_done; bool __ret_do_once = !!(bytes > iter->bi_size); if (__builtin_expect(!!(__ret_do_once && !__already_done), 0)) { __already_done = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("Attempted to advance past end of bvec iter\n"); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/bvec.h"), "i" (107), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_do_once), 0); })) {

iter->bi_size = 0;
return false;
}

iter->bi_size -= bytes;
bytes += iter->bi_bvec_done;

while (bytes && bytes >= bv[idx].bv_len) {
bytes -= bv[idx].bv_len;
idx++;
}

iter->bi_idx = idx;
iter->bi_bvec_done = bytes;
return true;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bvec_iter_advance_single(const struct bio_vec *bv,
struct bvec_iter *iter, unsigned int bytes)
{
unsigned int done = iter->bi_bvec_done + bytes;

if (done == bv[iter->bi_idx].bv_len) {
done = 0;
iter->bi_idx++;
}
iter->bi_bvec_done = done;
iter->bi_size -= bytes;
}
# 157 "./include/linux/bvec.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
{
iter_all->done = 0;
iter_all->idx = 0;

return &iter_all->bv;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bvec_advance(const struct bio_vec *bvec,
struct bvec_iter_all *iter_all)
{
struct bio_vec *bv = &iter_all->bv;

if (iter_all->done) {
bv->bv_page++;
bv->bv_offset = 0;
} else {
bv->bv_page = bvec->bv_page + (bvec->bv_offset >> (12));
bv->bv_offset = bvec->bv_offset & ~(~(((1UL) << (12)) - 1));
}
bv->bv_len = __builtin_choose_expr(((!!(sizeof((typeof((unsigned int)(((1UL) << (12)) - bv->bv_offset)) *)1 == (typeof((unsigned int)(bvec->bv_len - iter_all->done)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned int)(((1UL) << (12)) - bv->bv_offset)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned int)(bvec->bv_len - iter_all->done)) * 0l)) : (int *)8))))), (((unsigned int)(((1UL) << (12)) - bv->bv_offset)) < ((unsigned int)(bvec->bv_len - iter_all->done)) ? ((unsigned int)(((1UL) << (12)) - bv->bv_offset)) : ((unsigned int)(bvec->bv_len - iter_all->done))), ({ typeof((unsigned int)(((1UL) << (12)) - bv->bv_offset)) __UNIQUE_ID___x242 = ((unsigned int)(((1UL) << (12)) - bv->bv_offset)); typeof((unsigned int)(bvec->bv_len - iter_all->done)) __UNIQUE_ID___y243 = ((unsigned int)(bvec->bv_len - iter_all->done)); ((__UNIQUE_ID___x242) < (__UNIQUE_ID___y243) ? (__UNIQUE_ID___x242) : (__UNIQUE_ID___y243)); }));

iter_all->done += bv->bv_len;

if (iter_all->done == bvec->bv_len) {
iter_all->idx++;
iter_all->done = 0;
}
}
# 194 "./include/linux/bvec.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *bvec_kmap_local(struct bio_vec *bvec)
{
return kmap_local_page(bvec->bv_page) + bvec->bv_offset;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memcpy_from_bvec(char *to, struct bio_vec *bvec)
{
memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memcpy_to_bvec(struct bio_vec *bvec, const char *from)
{
memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memzero_bvec(struct bio_vec *bvec)
{
memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *bvec_virt(struct bio_vec *bvec)
{
({ int __ret_warn_on = !!(PageHighMem(bvec->bv_page)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/bvec.h"), "i" (240), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return lowmem_page_address(bvec->bv_page) + bvec->bv_offset;
}
# 18 "./include/linux/skbuff.h" 2






# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 25 "./include/linux/skbuff.h" 2


# 1 "./include/linux/textsearch.h" 1
# 11 "./include/linux/textsearch.h"
struct module;

struct ts_config;
# 23 "./include/linux/textsearch.h"
struct ts_state
{
unsigned int offset;
char cb[48];
};
# 39 "./include/linux/textsearch.h"
struct ts_ops
{
const char *name;
struct ts_config * (*init)(const void *, unsigned int, gfp_t, int);
unsigned int (*find)(struct ts_config *,
struct ts_state *);
void (*destroy)(struct ts_config *);
void * (*get_pattern)(struct ts_config *);
unsigned int (*get_pattern_len)(struct ts_config *);
struct module *owner;
struct list_head list;
};
# 59 "./include/linux/textsearch.h"
struct ts_config
{
struct ts_ops *ops;
int flags;
# 76 "./include/linux/textsearch.h"
unsigned int (*get_next_block)(unsigned int consumed,
const u8 **dst,
struct ts_config *conf,
struct ts_state *state);
# 89 "./include/linux/textsearch.h"
void (*finish)(struct ts_config *conf,
struct ts_state *state);
};
# 105 "./include/linux/textsearch.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int textsearch_next(struct ts_config *conf,
struct ts_state *state)
{
unsigned int ret = conf->ops->find(conf, state);

if (conf->finish)
conf->finish(conf, state);

return ret;
}
# 124 "./include/linux/textsearch.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int textsearch_find(struct ts_config *conf,
struct ts_state *state)
{
state->offset = 0;
return textsearch_next(conf, state);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *textsearch_get_pattern(struct ts_config *conf)
{
return conf->ops->get_pattern(conf);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int textsearch_get_pattern_len(struct ts_config *conf)
{
return conf->ops->get_pattern_len(conf);
}

extern int textsearch_register(struct ts_ops *);
extern int textsearch_unregister(struct ts_ops *);
extern struct ts_config *textsearch_prepare(const char *, const void *,
unsigned int, gfp_t, int);
extern void textsearch_destroy(struct ts_config *conf);
extern unsigned int textsearch_find_continuous(struct ts_config *,
struct ts_state *,
const void *, unsigned int);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ts_config *alloc_ts_config(size_t payload,
gfp_t gfp_mask)
{
struct ts_config *conf;

conf = kzalloc((((sizeof(*conf)) + 8 -1) & ~(8 -1)) + payload, gfp_mask);
if (conf == ((void *)0))
return ERR_PTR(-12);

return conf;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *ts_config_priv(struct ts_config *conf)
{
return ((u8 *) conf + (((sizeof(struct ts_config)) + 8 -1) & ~(8 -1)));
}
# 28 "./include/linux/skbuff.h" 2
# 1 "./include/net/checksum.h" 1
# 19 "./include/net/checksum.h"
# 1 "./arch/riscv/include/generated/uapi/asm/types.h" 1
# 20 "./include/net/checksum.h" 2


# 1 "./arch/riscv/include/generated/asm/checksum.h" 1
# 1 "./include/asm-generic/checksum.h" 1
# 17 "./include/asm-generic/checksum.h"
extern __wsum csum_partial(const void *buff, int len, __wsum sum);






extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __sum16 csum_fold(__wsum csum)
{
u32 sum = ( u32)csum;
sum = (sum & 0xffff) + (sum >> 16);
sum = (sum & 0xffff) + (sum >> 16);
return ( __sum16)~sum;
}







extern __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
__u8 proto, __wsum sum);



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __sum16
csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
__u8 proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}






extern __sum16 ip_compute_csum(const void *buff, int len);
# 2 "./arch/riscv/include/generated/asm/checksum.h" 2
# 23 "./include/net/checksum.h" 2


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__))
__wsum csum_and_copy_from_user (const void *src, void *dst,
int len)
{
if (copy_from_user(dst, src, len))
return 0;
return csum_partial(dst, len, ~0U);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __wsum csum_and_copy_to_user
(const void *src, void *dst, int len)
{
__wsum sum = csum_partial(src, len, ~0U);

if (copy_to_user(dst, src, len) == 0)
return sum;
return 0;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
memcpy(dst, src, len);
return csum_partial(dst, len, 0);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __wsum csum_add(__wsum csum, __wsum addend)
{
u32 res = ( u32)csum;
res += ( u32)addend;
return ( __wsum)(res + (res < ( u32)addend));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __wsum csum_sub(__wsum csum, __wsum addend)
{
return csum_add(csum, ~addend);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __sum16 csum16_add(__sum16 csum, __be16 addend)
{
u16 res = ( u16)csum;

res += ( u16)addend;
return ( __sum16)(res + (res < ( u16)addend));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __sum16 csum16_sub(__sum16 csum, __be16 addend)
{
return csum16_add(csum, ~addend);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __wsum csum_shift(__wsum sum, int offset)
{

if (offset & 1)
return ( __wsum)ror32(( u32)sum, 8);
return sum;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __wsum
csum_block_add(__wsum csum, __wsum csum2, int offset)
{
return csum_add(csum, csum_shift(csum2, offset));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __wsum
csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
{
return csum_block_add(csum, csum2, offset);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __wsum
csum_block_sub(__wsum csum, __wsum csum2, int offset)
{
return csum_block_add(csum, ~csum2, offset);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __wsum csum_unfold(__sum16 n)
{
return ( __wsum)n;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__))
__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
{
return csum_partial(buff, len, sum);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void csum_replace_by_diff(__sum16 *sum, __wsum diff)
{
*sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
__wsum tmp = csum_sub(~csum_unfold(*sum), ( __wsum)from);

*sum = csum_fold(csum_add(tmp, ( __wsum)to));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
{
*sum = ~csum16_add(csum16_sub(~(*sum), old), new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void csum_replace(__wsum *csum, __wsum old, __wsum new)
{
*csum = csum_add(csum_sub(*csum, old), new);
}

struct sk_buff;
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
__be32 from, __be32 to, bool pseudohdr);
void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
const __be32 *from, const __be32 *to,
bool pseudohdr);
void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
__wsum diff, bool pseudohdr);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__))
void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
__be16 from, __be16 to, bool pseudohdr)
{
inet_proto_csum_replace4(sum, skb, ( __be32)from,
( __be32)to, pseudohdr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __wsum remcsum_adjust(void *ptr, __wsum csum,
int start, int offset)
{
__sum16 *psum = (__sum16 *)(ptr + offset);
__wsum delta;


csum = csum_sub(csum, csum_partial(ptr, start, 0));


delta = csum_sub(( __wsum)csum_fold(csum),
( __wsum)*psum);
*psum = csum_fold(csum);

return delta;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void remcsum_unadjust(__sum16 *psum, __wsum delta)
{
*psum = csum_fold(csum_sub(delta, ( __wsum)*psum));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __wsum wsum_negate(__wsum val)
{
return ( __wsum)-(( u32)val);
}
# 29 "./include/linux/skbuff.h" 2


# 1 "./include/linux/dma-mapping.h" 1






# 1 "./include/linux/device.h" 1
# 15 "./include/linux/device.h"
# 1 "./include/linux/dev_printk.h" 1
# 22 "./include/linux/dev_printk.h"
struct device;




struct dev_printk_info {
char subsystem[16];
char device[48];
};



__attribute__((__format__(printf, 3, 0))) __attribute__((__cold__))
int dev_vprintk_emit(int level, const struct device *dev,
const char *fmt, va_list args);
__attribute__((__format__(printf, 3, 4))) __attribute__((__cold__))
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);

__attribute__((__format__(printf, 3, 4))) __attribute__((__cold__))
void _dev_printk(const char *level, const struct device *dev,
const char *fmt, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void _dev_emerg(const struct device *dev, const char *fmt, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void _dev_alert(const struct device *dev, const char *fmt, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void _dev_crit(const struct device *dev, const char *fmt, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void _dev_err(const struct device *dev, const char *fmt, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void _dev_warn(const struct device *dev, const char *fmt, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void _dev_notice(const struct device *dev, const char *fmt, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void _dev_info(const struct device *dev, const char *fmt, ...);
# 16 "./include/linux/device.h" 2
# 1 "./include/linux/energy_model.h" 1




# 1 "./include/linux/device.h" 1
# 6 "./include/linux/energy_model.h" 2



# 1 "./include/linux/sched/cpufreq.h" 1
# 10 "./include/linux/energy_model.h" 2
# 1 "./include/linux/sched/topology.h" 1






# 1 "./include/linux/sched/idle.h" 1






enum cpu_idle_type {
CPU_IDLE,
CPU_NOT_IDLE,
CPU_NEWLY_IDLE,
CPU_MAX_IDLE_TYPES
};


extern void wake_up_if_idle(int cpu);
# 63 "./include/linux/sched/idle.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __current_set_polling(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __current_clr_polling(void) { }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __attribute__((__warn_unused_result__)) current_set_polling_and_test(void)
{
return __builtin_expect(!!(test_ti_thread_flag(((struct thread_info *)get_current()), 3)), 0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __attribute__((__warn_unused_result__)) current_clr_polling_and_test(void)
{
return __builtin_expect(!!(test_ti_thread_flag(((struct thread_info *)get_current()), 3)), 0);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void current_clr_polling(void)
{
__current_clr_polling();







do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);

do { if (test_ti_thread_flag(((struct thread_info *)get_current()), 3)) set_preempt_need_resched(); } while (0);
}
# 8 "./include/linux/sched/topology.h" 2








enum {

# 1 "./include/linux/sched/sd_flags.h" 1
# 51 "./include/linux/sched/sd_flags.h"
__SD_BALANCE_NEWIDLE,







__SD_BALANCE_EXEC,







__SD_BALANCE_FORK,







__SD_BALANCE_WAKE,






__SD_WAKE_AFFINE,
# 91 "./include/linux/sched/sd_flags.h"
__SD_ASYM_CPUCAPACITY,
# 101 "./include/linux/sched/sd_flags.h"
__SD_ASYM_CPUCAPACITY_FULL,
# 110 "./include/linux/sched/sd_flags.h"
__SD_SHARE_CPUCAPACITY,
# 119 "./include/linux/sched/sd_flags.h"
__SD_SHARE_PKG_RESOURCES,
# 130 "./include/linux/sched/sd_flags.h"
__SD_SERIALIZE,
# 140 "./include/linux/sched/sd_flags.h"
__SD_ASYM_PACKING,
# 150 "./include/linux/sched/sd_flags.h"
__SD_PREFER_SIBLING,







__SD_OVERLAP,







__SD_NUMA,
# 18 "./include/linux/sched/topology.h" 2
__SD_FLAG_CNT,
};



enum {

# 1 "./include/linux/sched/sd_flags.h" 1
# 51 "./include/linux/sched/sd_flags.h"
SD_BALANCE_NEWIDLE = 1 << __SD_BALANCE_NEWIDLE,







SD_BALANCE_EXEC = 1 << __SD_BALANCE_EXEC,







SD_BALANCE_FORK = 1 << __SD_BALANCE_FORK,







SD_BALANCE_WAKE = 1 << __SD_BALANCE_WAKE,






SD_WAKE_AFFINE = 1 << __SD_WAKE_AFFINE,
# 91 "./include/linux/sched/sd_flags.h"
SD_ASYM_CPUCAPACITY = 1 << __SD_ASYM_CPUCAPACITY,
# 101 "./include/linux/sched/sd_flags.h"
SD_ASYM_CPUCAPACITY_FULL = 1 << __SD_ASYM_CPUCAPACITY_FULL,
# 110 "./include/linux/sched/sd_flags.h"
SD_SHARE_CPUCAPACITY = 1 << __SD_SHARE_CPUCAPACITY,
# 119 "./include/linux/sched/sd_flags.h"
SD_SHARE_PKG_RESOURCES = 1 << __SD_SHARE_PKG_RESOURCES,
# 130 "./include/linux/sched/sd_flags.h"
SD_SERIALIZE = 1 << __SD_SERIALIZE,
# 140 "./include/linux/sched/sd_flags.h"
SD_ASYM_PACKING = 1 << __SD_ASYM_PACKING,
# 150 "./include/linux/sched/sd_flags.h"
SD_PREFER_SIBLING = 1 << __SD_PREFER_SIBLING,







SD_OVERLAP = 1 << __SD_OVERLAP,







SD_NUMA = 1 << __SD_NUMA,
# 25 "./include/linux/sched/topology.h" 2
};




struct sd_flag_debug {
unsigned int meta_flags;
char *name;
};
extern const struct sd_flag_debug sd_flag_debug[];
# 66 "./include/linux/sched/topology.h"
extern int arch_asym_cpu_priority(int cpu);

struct sched_domain_attr {
int relax_domain_level;
};





extern int sched_domain_level_max;

struct sched_group;

struct sched_domain_shared {
atomic_t ref;
atomic_t nr_busy_cpus;
int has_idle_cores;
};

struct sched_domain {

struct sched_domain *parent;
struct sched_domain *child;
struct sched_group *groups;
unsigned long min_interval;
unsigned long max_interval;
unsigned int busy_factor;
unsigned int imbalance_pct;
unsigned int cache_nice_tries;
unsigned int imb_numa_nr;

int nohz_idle;
int flags;
int level;


unsigned long last_balance;
unsigned int balance_interval;
unsigned int nr_balance_failed;


u64 max_newidle_lb_cost;
unsigned long last_decay_max_lb_cost;

u64 avg_scan_cost;



unsigned int lb_count[CPU_MAX_IDLE_TYPES];
unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];


unsigned int alb_count;
unsigned int alb_failed;
unsigned int alb_pushed;


unsigned int sbe_count;
unsigned int sbe_balanced;
unsigned int sbe_pushed;


unsigned int sbf_count;
unsigned int sbf_balanced;
unsigned int sbf_pushed;


unsigned int ttwu_wake_remote;
unsigned int ttwu_move_affine;
unsigned int ttwu_move_balance;


char *name;

union {
void *private;
struct callback_head rcu;
};
struct sched_domain_shared *shared;

unsigned int span_weight;







unsigned long span[];
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cpumask *sched_domain_span(struct sched_domain *sd)
{
return ((struct cpumask *)(1 ? (sd->span) : (void *)sizeof(__check_is_bitmap(sd->span))));
}

extern void partition_sched_domains_locked(int ndoms_new,
cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new);

extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new);


cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);

bool cpus_share_cache(int this_cpu, int that_cpu);

typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef int (*sched_domain_flags_f)(void);



struct sd_data {
struct sched_domain * *sd;
struct sched_domain_shared * *sds;
struct sched_group * *sg;
struct sched_group_capacity * *sgc;
};

struct sched_domain_topology_level {
sched_domain_mask_f mask;
sched_domain_flags_f sd_flags;
int flags;
int numa_level;
struct sd_data data;

char *name;

};

extern void set_sched_topology(struct sched_domain_topology_level *tl);
# 239 "./include/linux/sched/topology.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rebuild_sched_domains_energy(void)
{
}
# 255 "./include/linux/sched/topology.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__))
unsigned long arch_scale_cpu_capacity(int cpu)
{
return (1L << 10);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__))
unsigned long arch_scale_thermal_pressure(int cpu)
{
return 0;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__))
void arch_update_thermal_pressure(const struct cpumask *cpus,
unsigned long capped_frequency)
{ }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int task_node(const struct task_struct *p)
{
return ((void)(task_cpu(p)),0);
}
# 11 "./include/linux/energy_model.h" 2
# 22 "./include/linux/energy_model.h"
struct em_perf_state {
unsigned long frequency;
unsigned long power;
unsigned long cost;
unsigned long flags;
};
# 55 "./include/linux/energy_model.h"
struct em_perf_domain {
struct em_perf_state *table;
int nr_perf_states;
unsigned long flags;
unsigned long cpus[];
};
# 266 "./include/linux/energy_model.h"
struct em_data_callback {};



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
struct em_data_callback *cb, cpumask_t *span,
bool milliwatts)
{
return -22;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void em_dev_unregister_perf_domain(struct device *dev)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct em_perf_domain *em_cpu_get(int cpu)
{
return ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct em_perf_domain *em_pd_get(struct device *dev)
{
return ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long em_cpu_energy(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util,
unsigned long allowed_cpu_cap)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int em_pd_nr_perf_states(struct em_perf_domain *pd)
{
return 0;
}
# 17 "./include/linux/device.h" 2
# 1 "./include/linux/ioport.h" 1
# 21 "./include/linux/ioport.h"
struct resource {
resource_size_t start;
resource_size_t end;
const char *name;
unsigned long flags;
unsigned long desc;
struct resource *parent, *sibling, *child;
};
# 134 "./include/linux/ioport.h"
enum {
IORES_DESC_NONE = 0,
IORES_DESC_CRASH_KERNEL = 1,
IORES_DESC_ACPI_TABLES = 2,
IORES_DESC_ACPI_NV_STORAGE = 3,
IORES_DESC_PERSISTENT_MEMORY = 4,
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
IORES_DESC_RESERVED = 7,
IORES_DESC_SOFT_RESERVED = 8,
};




enum {
IORES_MAP_SYSTEM_RAM = ((((1UL))) << (0)),
IORES_MAP_ENCRYPTED = ((((1UL))) << (1)),
};
# 185 "./include/linux/ioport.h"
extern struct resource ioport_resource;
extern struct resource iomem_resource;

extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
extern int request_resource(struct resource *root, struct resource *new);
extern int release_resource(struct resource *new);
void release_child_resources(struct resource *new);
extern void reserve_region_with_split(struct resource *root,
resource_size_t start, resource_size_t end,
const char *name);
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
extern int remove_resource(struct resource *old);
extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
resource_size_t max, resource_size_t align,
resource_size_t (*alignf)(void *,
const struct resource *,
resource_size_t,
resource_size_t),
void *alignf_data);
struct resource *lookup_resource(struct resource *root, resource_size_t start);
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long resource_type(const struct resource *res)
{
return res->flags & 0x00001f00;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long resource_ext_type(const struct resource *res)
{
return res->flags & 0x01000000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool resource_contains(struct resource *r1, struct resource *r2)
{
if (resource_type(r1) != resource_type(r2))
return false;
if (r1->flags & 0x20000000 || r2->flags & 0x20000000)
return false;
return r1->start <= r2->start && r1->end >= r2->end;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool resource_overlaps(struct resource *r1, struct resource *r2)
{
return r1->start <= r2->end && r1->end >= r2->start;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
resource_intersection(struct resource *r1, struct resource *r2, struct resource *r)
{
if (!resource_overlaps(r1, r2))
return false;
r->start = __builtin_choose_expr(((!!(sizeof((typeof(r1->start) *)1 == (typeof(r2->start) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(r1->start) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(r2->start) * 0l)) : (int *)8))))), ((r1->start) > (r2->start) ? (r1->start) : (r2->start)), ({ typeof(r1->start) __UNIQUE_ID___x244 = (r1->start); typeof(r2->start) __UNIQUE_ID___y245 = (r2->start); ((__UNIQUE_ID___x244) > (__UNIQUE_ID___y245) ? (__UNIQUE_ID___x244) : (__UNIQUE_ID___y245)); }));
r->end = __builtin_choose_expr(((!!(sizeof((typeof(r1->end) *)1 == (typeof(r2->end) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(r1->end) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(r2->end) * 0l)) : (int *)8))))), ((r1->end) < (r2->end) ? (r1->end) : (r2->end)), ({ typeof(r1->end) __UNIQUE_ID___x246 = (r1->end); typeof(r2->end) __UNIQUE_ID___y247 = (r2->end); ((__UNIQUE_ID___x246) < (__UNIQUE_ID___y247) ? (__UNIQUE_ID___x246) : (__UNIQUE_ID___y247)); }));
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
resource_union(struct resource *r1, struct resource *r2, struct resource *r)
{
if (!resource_overlaps(r1, r2))
return false;
r->start = __builtin_choose_expr(((!!(sizeof((typeof(r1->start) *)1 == (typeof(r2->start) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(r1->start) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(r2->start) * 0l)) : (int *)8))))), ((r1->start) < (r2->start) ? (r1->start) : (r2->start)), ({ typeof(r1->start) __UNIQUE_ID___x248 = (r1->start); typeof(r2->start) __UNIQUE_ID___y249 = (r2->start); ((__UNIQUE_ID___x248) < (__UNIQUE_ID___y249) ? (__UNIQUE_ID___x248) : (__UNIQUE_ID___y249)); }));
r->end = __builtin_choose_expr(((!!(sizeof((typeof(r1->end) *)1 == (typeof(r2->end) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(r1->end) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(r2->end) * 0l)) : (int *)8))))), ((r1->end) > (r2->end) ? (r1->end) : (r2->end)), ({ typeof(r1->end) __UNIQUE_ID___x250 = (r1->end); typeof(r2->end) __UNIQUE_ID___y251 = (r2->end); ((__UNIQUE_ID___x250) > (__UNIQUE_ID___y251) ? (__UNIQUE_ID___x250) : (__UNIQUE_ID___y251)); }));
return true;
}
# 271 "./include/linux/ioport.h"
extern struct resource * __request_region(struct resource *,
resource_size_t start,
resource_size_t n,
const char *name, int flags);





extern void __release_region(struct resource *, resource_size_t,
resource_size_t);
# 290 "./include/linux/ioport.h"
struct device;

extern int devm_request_resource(struct device *dev, struct resource *root,
struct resource *new);
extern void devm_release_resource(struct device *dev, struct resource *new);






extern struct resource * __devm_request_region(struct device *dev,
struct resource *parent, resource_size_t start,
resource_size_t n, const char *name);






extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n);
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
extern bool iomem_is_exclusive(u64 addr);

extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg, int (*func)(unsigned long, unsigned long, void *));
extern int
walk_mem_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *));
extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *));
extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
void *arg, int (*func)(struct resource *, void *));

struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *base, unsigned long size);
struct resource *request_free_mem_region(struct resource *base,
unsigned long size, const char *name);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void irqresource_disabled(struct resource *res, u32 irq)
{
res->start = irq;
res->end = irq;
res->flags |= 0x00000400 | 0x10000000 | 0x20000000;
}

extern struct address_space *iomem_get_mapping(void);
# 18 "./include/linux/device.h" 2

# 1 "./include/linux/klist.h" 1
# 17 "./include/linux/klist.h"
struct klist_node;
struct klist {
spinlock_t k_lock;
struct list_head k_list;
void (*get)(struct klist_node *);
void (*put)(struct klist_node *);
} __attribute__ ((aligned (sizeof(void *))));
# 34 "./include/linux/klist.h"
extern void klist_init(struct klist *k, void (*get)(struct klist_node *),
void (*put)(struct klist_node *));

struct klist_node {
void *n_klist;
struct list_head n_node;
struct kref n_ref;
};

extern void klist_add_tail(struct klist_node *n, struct klist *k);
extern void klist_add_head(struct klist_node *n, struct klist *k);
extern void klist_add_behind(struct klist_node *n, struct klist_node *pos);
extern void klist_add_before(struct klist_node *n, struct klist_node *pos);

extern void klist_del(struct klist_node *n);
extern void klist_remove(struct klist_node *n);

extern int klist_node_attached(struct klist_node *n);


struct klist_iter {
struct klist *i_klist;
struct klist_node *i_cur;
};


extern void klist_iter_init(struct klist *k, struct klist_iter *i);
extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n);
extern void klist_iter_exit(struct klist_iter *i);
extern struct klist_node *klist_prev(struct klist_iter *i);
extern struct klist_node *klist_next(struct klist_iter *i);
# 20 "./include/linux/device.h" 2





# 1 "./include/linux/pm.h" 1
# 23 "./include/linux/pm.h"
extern void (*pm_power_off)(void);
extern void (*pm_power_off_prepare)(void);

struct device;




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pm_vt_switch_required(struct device *dev, bool required)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pm_vt_switch_unregister(struct device *dev)
{
}
# 45 "./include/linux/pm.h"
extern const char power_group_name[];




typedef struct pm_message {
int event;
} pm_message_t;
# 278 "./include/linux/pm.h"
struct dev_pm_ops {
int (*prepare)(struct device *dev);
void (*complete)(struct device *dev);
int (*suspend)(struct device *dev);
int (*resume)(struct device *dev);
int (*freeze)(struct device *dev);
int (*thaw)(struct device *dev);
int (*poweroff)(struct device *dev);
int (*restore)(struct device *dev);
int (*suspend_late)(struct device *dev);
int (*resume_early)(struct device *dev);
int (*freeze_late)(struct device *dev);
int (*thaw_early)(struct device *dev);
int (*poweroff_late)(struct device *dev);
int (*restore_early)(struct device *dev);
int (*suspend_noirq)(struct device *dev);
int (*resume_noirq)(struct device *dev);
int (*freeze_noirq)(struct device *dev);
int (*thaw_noirq)(struct device *dev);
int (*poweroff_noirq)(struct device *dev);
int (*restore_noirq)(struct device *dev);
int (*runtime_suspend)(struct device *dev);
int (*runtime_resume)(struct device *dev);
int (*runtime_idle)(struct device *dev);
};
# 548 "./include/linux/pm.h"
enum rpm_status {
RPM_INVALID = -1,
RPM_ACTIVE = 0,
RPM_RESUMING,
RPM_SUSPENDED,
RPM_SUSPENDING,
};
# 571 "./include/linux/pm.h"
enum rpm_request {
RPM_REQ_NONE = 0,
RPM_REQ_IDLE,
RPM_REQ_SUSPEND,
RPM_REQ_AUTOSUSPEND,
RPM_REQ_RESUME,
};

struct wakeup_source;
struct wake_irq;
struct pm_domain_data;

struct pm_subsys_data {
spinlock_t lock;
unsigned int refcount;

unsigned int clock_op_might_sleep;
struct mutex clock_mutex;
struct list_head clock_list;


struct pm_domain_data *domain_data;

};
# 614 "./include/linux/pm.h"
struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
unsigned int async_suspend:1;
bool in_dpm_list:1;
bool is_prepared:1;
bool is_suspended:1;
bool is_noirq_suspended:1;
bool is_late_suspended:1;
bool no_pm:1;
bool early_init:1;
bool direct_complete:1;
u32 driver_flags;
spinlock_t lock;
# 638 "./include/linux/pm.h"
unsigned int should_wakeup:1;


struct hrtimer suspend_timer;
u64 timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
struct wake_irq *wakeirq;
atomic_t usage_count;
atomic_t child_count;
unsigned int disable_depth:3;
unsigned int idle_notification:1;
unsigned int request_pending:1;
unsigned int deferred_resume:1;
unsigned int needs_force_resume:1;
unsigned int runtime_auto:1;
bool ignore_children:1;
unsigned int no_callbacks:1;
unsigned int irq_safe:1;
unsigned int use_autosuspend:1;
unsigned int timer_autosuspends:1;
unsigned int memalloc_noio:1;
unsigned int links_count;
enum rpm_request request;
enum rpm_status runtime_status;
enum rpm_status last_status;
int runtime_error;
int autosuspend_delay;
u64 last_busy;
u64 active_time;
u64 suspended_time;
u64 accounting_timestamp;

struct pm_subsys_data *subsys_data;
void (*set_latency_tolerance)(struct device *, s32);
struct dev_pm_qos *qos;
};

extern int dev_pm_get_subsys_data(struct device *dev);
extern void dev_pm_put_subsys_data(struct device *dev);
# 693 "./include/linux/pm.h"
struct dev_pm_domain {
struct dev_pm_ops ops;
int (*start)(struct device *dev);
void (*detach)(struct device *dev, bool power_off);
int (*activate)(struct device *dev);
void (*sync)(struct device *dev);
void (*dismiss)(struct device *dev);
};
# 812 "./include/linux/pm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dpm_suspend_start(pm_message_t state)
{
return 0;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int device_pm_wait_for_dev(struct device *a, struct device *b)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
}
# 851 "./include/linux/pm.h"
enum dpm_order {
DPM_ORDER_NONE,
DPM_ORDER_DEV_AFTER_PARENT,
DPM_ORDER_PARENT_BEFORE_DEV,
DPM_ORDER_DEV_LAST,
};
# 26 "./include/linux/device.h" 2




# 1 "./include/linux/device/bus.h" 1
# 21 "./include/linux/device/bus.h"
struct device_driver;
struct fwnode_handle;
# 82 "./include/linux/device/bus.h"
struct bus_type {
const char *name;
const char *dev_name;
struct device *dev_root;
const struct attribute_group **bus_groups;
const struct attribute_group **dev_groups;
const struct attribute_group **drv_groups;

int (*match)(struct device *dev, struct device_driver *drv);
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
int (*probe)(struct device *dev);
void (*sync_state)(struct device *dev);
void (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);

int (*online)(struct device *dev);
int (*offline)(struct device *dev);

int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);

int (*num_vf)(struct device *dev);

int (*dma_configure)(struct device *dev);

const struct dev_pm_ops *pm;

const struct iommu_ops *iommu_ops;

struct subsys_private *p;
struct lock_class_key lock_key;

bool need_parent_lock;
};

extern int __attribute__((__warn_unused_result__)) bus_register(struct bus_type *bus);

extern void bus_unregister(struct bus_type *bus);

extern int __attribute__((__warn_unused_result__)) bus_rescan_devices(struct bus_type *bus);

struct bus_attribute {
struct attribute attr;
ssize_t (*show)(struct bus_type *bus, char *buf);
ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
};
# 136 "./include/linux/device/bus.h"
extern int __attribute__((__warn_unused_result__)) bus_create_file(struct bus_type *,
struct bus_attribute *);
extern void bus_remove_file(struct bus_type *, struct bus_attribute *);


int device_match_name(struct device *dev, const void *name);
int device_match_of_node(struct device *dev, const void *np);
int device_match_fwnode(struct device *dev, const void *fwnode);
int device_match_devt(struct device *dev, const void *pdevt);
int device_match_acpi_dev(struct device *dev, const void *adev);
int device_match_acpi_handle(struct device *dev, const void *handle);
int device_match_any(struct device *dev, const void *unused);


struct subsys_dev_iter {
struct klist_iter ki;
const struct device_type *type;
};
void subsys_dev_iter_init(struct subsys_dev_iter *iter,
struct bus_type *subsys,
struct device *start,
const struct device_type *type);
struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
void subsys_dev_iter_exit(struct subsys_dev_iter *iter);

int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
int (*fn)(struct device *dev, void *data));
struct device *bus_find_device(struct bus_type *bus, struct device *start,
const void *data,
int (*match)(struct device *dev, const void *data));







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *bus_find_device_by_name(struct bus_type *bus,
struct device *start,
const char *name)
{
return bus_find_device(bus, start, name, device_match_name);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *
bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np)
{
return bus_find_device(bus, ((void *)0), np, device_match_of_node);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *
bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwnode)
{
return bus_find_device(bus, ((void *)0), fwnode, device_match_fwnode);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *bus_find_device_by_devt(struct bus_type *bus,
dev_t devt)
{
return bus_find_device(bus, ((void *)0), &devt, device_match_devt);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *
bus_find_next_device(struct bus_type *bus,struct device *cur)
{
return bus_find_device(bus, cur, ((void *)0), device_match_any);
}
# 243 "./include/linux/device/bus.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *
bus_find_device_by_acpi_dev(struct bus_type *bus, const void *adev)
{
return ((void *)0);
}


struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
struct device *hint);
int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
void *data, int (*fn)(struct device_driver *, void *));
void bus_sort_breadthfirst(struct bus_type *bus,
int (*compare)(const struct device *a,
const struct device *b));






struct notifier_block;

extern int bus_register_notifier(struct bus_type *bus,
struct notifier_block *nb);
extern int bus_unregister_notifier(struct bus_type *bus,
struct notifier_block *nb);
# 286 "./include/linux/device/bus.h"
extern struct kset *bus_get_kset(struct bus_type *bus);
extern struct klist *bus_get_device_klist(struct bus_type *bus);
# 31 "./include/linux/device.h" 2
# 1 "./include/linux/device/class.h" 1
# 22 "./include/linux/device/class.h"
struct device;
struct fwnode_handle;
# 54 "./include/linux/device/class.h"
struct class {
const char *name;
struct module *owner;

const struct attribute_group **class_groups;
const struct attribute_group **dev_groups;
struct kobject *dev_kobj;

int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
char *(*devnode)(struct device *dev, umode_t *mode);

void (*class_release)(struct class *class);
void (*dev_release)(struct device *dev);

int (*shutdown_pre)(struct device *dev);

const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);

void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid);

const struct dev_pm_ops *pm;

struct subsys_private *p;
};

struct class_dev_iter {
struct klist_iter ki;
const struct device_type *type;
};

extern struct kobject *sysfs_dev_block_kobj;
extern struct kobject *sysfs_dev_char_kobj;
extern int __attribute__((__warn_unused_result__)) __class_register(struct class *class,
struct lock_class_key *key);
extern void class_unregister(struct class *class);
# 99 "./include/linux/device/class.h"
struct class_compat;
struct class_compat *class_compat_register(const char *name);
void class_compat_unregister(struct class_compat *cls);
int class_compat_create_link(struct class_compat *cls, struct device *dev,
struct device *device_link);
void class_compat_remove_link(struct class_compat *cls, struct device *dev,
struct device *device_link);

extern void class_dev_iter_init(struct class_dev_iter *iter,
struct class *class,
struct device *start,
const struct device_type *type);
extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
extern void class_dev_iter_exit(struct class_dev_iter *iter);

extern int class_for_each_device(struct class *class, struct device *start,
void *data,
int (*fn)(struct device *dev, void *data));
extern struct device *class_find_device(struct class *class,
struct device *start, const void *data,
int (*match)(struct device *, const void *));







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *class_find_device_by_name(struct class *class,
const char *name)
{
return class_find_device(class, ((void *)0), name, device_match_name);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *
class_find_device_by_of_node(struct class *class, const struct device_node *np)
{
return class_find_device(class, ((void *)0), np, device_match_of_node);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *
class_find_device_by_fwnode(struct class *class,
const struct fwnode_handle *fwnode)
{
return class_find_device(class, ((void *)0), fwnode, device_match_fwnode);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *class_find_device_by_devt(struct class *class,
dev_t devt)
{
return class_find_device(class, ((void *)0), &devt, device_match_devt);
}
# 184 "./include/linux/device/class.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *
class_find_device_by_acpi_dev(struct class *class, const void *adev)
{
return ((void *)0);
}


struct class_attribute {
struct attribute attr;
ssize_t (*show)(struct class *class, struct class_attribute *attr,
char *buf);
ssize_t (*store)(struct class *class, struct class_attribute *attr,
const char *buf, size_t count);
};
# 206 "./include/linux/device/class.h"
extern int __attribute__((__warn_unused_result__)) class_create_file_ns(struct class *class,
const struct class_attribute *attr,
const void *ns);
extern void class_remove_file_ns(struct class *class,
const struct class_attribute *attr,
const void *ns);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) class_create_file(struct class *class,
const struct class_attribute *attr)
{
return class_create_file_ns(class, attr, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void class_remove_file(struct class *class,
const struct class_attribute *attr)
{
return class_remove_file_ns(class, attr, ((void *)0));
}


struct class_attribute_string {
struct class_attribute attr;
char *str;
};
# 238 "./include/linux/device/class.h"
extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
char *buf);

struct class_interface {
struct list_head node;
struct class *class;

int (*add_dev) (struct device *, struct class_interface *);
void (*remove_dev) (struct device *, struct class_interface *);
};

extern int __attribute__((__warn_unused_result__)) class_interface_register(struct class_interface *);
extern void class_interface_unregister(struct class_interface *);

extern struct class * __attribute__((__warn_unused_result__)) __class_create(struct module *owner,
const char *name,
struct lock_class_key *key);
extern void class_destroy(struct class *cls);
# 32 "./include/linux/device.h" 2
# 1 "./include/linux/device/driver.h" 1
# 21 "./include/linux/device/driver.h"
# 1 "./include/linux/module.h" 1
# 14 "./include/linux/module.h"
# 1 "./include/linux/buildid.h" 1








int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
__u32 *size);
int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);


extern unsigned char vmlinux_build_id[20];
void init_vmlinux_build_id(void);
# 15 "./include/linux/module.h" 2


# 1 "./include/linux/kmod.h" 1








# 1 "./include/linux/umh.h" 1
# 11 "./include/linux/umh.h"
struct cred;
struct file;






struct subprocess_info {
struct work_struct work;
struct completion *complete;
const char *path;
char **argv;
char **envp;
int wait;
int retval;
int (*init)(struct subprocess_info *info, struct cred *new);
void (*cleanup)(struct subprocess_info *info);
void *data;
} ;

extern int
call_usermodehelper(const char *path, char **argv, char **envp, int wait);

extern struct subprocess_info *
call_usermodehelper_setup(const char *path, char **argv, char **envp,
gfp_t gfp_mask,
int (*init)(struct subprocess_info *info, struct cred *new),
void (*cleanup)(struct subprocess_info *), void *data);

extern int
call_usermodehelper_exec(struct subprocess_info *info, int wait);

extern struct ctl_table usermodehelper_table[];

enum umh_disable_depth {
UMH_ENABLED = 0,
UMH_FREEZING,
UMH_DISABLED,
};

extern int __usermodehelper_disable(enum umh_disable_depth depth);
extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int usermodehelper_disable(void)
{
return __usermodehelper_disable(UMH_DISABLED);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void usermodehelper_enable(void)
{
__usermodehelper_set_disable_depth(UMH_ENABLED);
}

extern int usermodehelper_read_trylock(void);
extern long usermodehelper_read_lock_wait(long timeout);
extern void usermodehelper_read_unlock(void);
# 10 "./include/linux/kmod.h" 2
# 20 "./include/linux/kmod.h"
extern char modprobe_path[];


extern __attribute__((__format__(printf, 2, 3)))
int __request_module(bool wait, const char *name, ...);
# 18 "./include/linux/module.h" 2

# 1 "./include/linux/elf.h" 1





# 1 "./arch/riscv/include/asm/elf.h" 1
# 11 "./arch/riscv/include/asm/elf.h"
# 1 "./arch/riscv/include/uapi/asm/elf.h" 1
# 18 "./arch/riscv/include/uapi/asm/elf.h"
typedef unsigned long elf_greg_t;
typedef struct user_regs_struct elf_gregset_t;



typedef __u64 elf_fpreg_t;
typedef union __riscv_fp_state elf_fpregset_t;
# 12 "./arch/riscv/include/asm/elf.h" 2


# 1 "./arch/riscv/include/asm/cacheinfo.h" 1








# 1 "./include/linux/cacheinfo.h" 1








struct device_node;
struct attribute;

enum cache_type {
CACHE_TYPE_NOCACHE = 0,
CACHE_TYPE_INST = ((((1UL))) << (0)),
CACHE_TYPE_DATA = ((((1UL))) << (1)),
CACHE_TYPE_SEPARATE = CACHE_TYPE_INST | CACHE_TYPE_DATA,
CACHE_TYPE_UNIFIED = ((((1UL))) << (2)),
};

extern unsigned int coherency_max_size;
# 49 "./include/linux/cacheinfo.h"
struct cacheinfo {
unsigned int id;
enum cache_type type;
unsigned int level;
unsigned int coherency_line_size;
unsigned int number_of_sets;
unsigned int ways_of_associativity;
unsigned int physical_line_partition;
unsigned int size;
cpumask_t shared_cpu_map;
unsigned int attributes;
# 69 "./include/linux/cacheinfo.h"
void *fw_token;
bool disable_sysfs;
void *priv;
};

struct cpu_cacheinfo {
struct cacheinfo *info_list;
unsigned int num_levels;
unsigned int num_leaves;
bool cpu_map_populated;
};

struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
int init_cache_level(unsigned int cpu);
int populate_cache_leaves(unsigned int cpu);
int cache_setup_acpi(unsigned int cpu);
# 94 "./include/linux/cacheinfo.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int acpi_find_last_cache_level(unsigned int cpu)
{
return 0;
}




const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_cpu_cacheinfo_id(int cpu, int level)
{
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
int i;

for (i = 0; i < ci->num_leaves; i++) {
if (ci->info_list[i].level == level) {
if (ci->info_list[i].attributes & ((((1UL))) << (4)))
return ci->info_list[i].id;
return -1;
}
}

return -1;
}
# 10 "./arch/riscv/include/asm/cacheinfo.h" 2

struct riscv_cacheinfo_ops {
const struct attribute_group * (*get_priv_group)(struct cacheinfo
*this_leaf);
};

void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops);
uintptr_t get_cache_size(u32 level, enum cache_type type);
uintptr_t get_cache_geometry(u32 level, enum cache_type type);
# 15 "./arch/riscv/include/asm/elf.h" 2
# 54 "./arch/riscv/include/asm/elf.h"
extern unsigned long elf_hwcap;
# 82 "./arch/riscv/include/asm/elf.h"
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
# 7 "./include/linux/elf.h" 2
# 1 "./include/uapi/linux/elf.h" 1





# 1 "./include/uapi/linux/elf-em.h" 1
# 7 "./include/uapi/linux/elf.h" 2


typedef __u32 Elf32_Addr;
typedef __u16 Elf32_Half;
typedef __u32 Elf32_Off;
typedef __s32 Elf32_Sword;
typedef __u32 Elf32_Word;


typedef __u64 Elf64_Addr;
typedef __u16 Elf64_Half;
typedef __s16 Elf64_SHalf;
typedef __u64 Elf64_Off;
typedef __s32 Elf64_Sword;
typedef __u32 Elf64_Word;
typedef __u64 Elf64_Xword;
typedef __s64 Elf64_Sxword;
# 143 "./include/uapi/linux/elf.h"
typedef struct dynamic{
Elf32_Sword d_tag;
union{
Elf32_Sword d_val;
Elf32_Addr d_ptr;
} d_un;
} Elf32_Dyn;

typedef struct {
Elf64_Sxword d_tag;
union {
Elf64_Xword d_val;
Elf64_Addr d_ptr;
} d_un;
} Elf64_Dyn;
# 166 "./include/uapi/linux/elf.h"
typedef struct elf32_rel {
Elf32_Addr r_offset;
Elf32_Word r_info;
} Elf32_Rel;

typedef struct elf64_rel {
Elf64_Addr r_offset;
Elf64_Xword r_info;
} Elf64_Rel;

typedef struct elf32_rela{
Elf32_Addr r_offset;
Elf32_Word r_info;
Elf32_Sword r_addend;
} Elf32_Rela;

typedef struct elf64_rela {
Elf64_Addr r_offset;
Elf64_Xword r_info;
Elf64_Sxword r_addend;
} Elf64_Rela;

typedef struct elf32_sym{
Elf32_Word st_name;
Elf32_Addr st_value;
Elf32_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf32_Half st_shndx;
} Elf32_Sym;

typedef struct elf64_sym {
Elf64_Word st_name;
unsigned char st_info;
unsigned char st_other;
Elf64_Half st_shndx;
Elf64_Addr st_value;
Elf64_Xword st_size;
} Elf64_Sym;




typedef struct elf32_hdr{
unsigned char e_ident[16];
Elf32_Half e_type;
Elf32_Half e_machine;
Elf32_Word e_version;
Elf32_Addr e_entry;
Elf32_Off e_phoff;
Elf32_Off e_shoff;
Elf32_Word e_flags;
Elf32_Half e_ehsize;
Elf32_Half e_phentsize;
Elf32_Half e_phnum;
Elf32_Half e_shentsize;
Elf32_Half e_shnum;
Elf32_Half e_shstrndx;
} Elf32_Ehdr;

typedef struct elf64_hdr {
unsigned char e_ident[16];
Elf64_Half e_type;
Elf64_Half e_machine;
Elf64_Word e_version;
Elf64_Addr e_entry;
Elf64_Off e_phoff;
Elf64_Off e_shoff;
Elf64_Word e_flags;
Elf64_Half e_ehsize;
Elf64_Half e_phentsize;
Elf64_Half e_phnum;
Elf64_Half e_shentsize;
Elf64_Half e_shnum;
Elf64_Half e_shstrndx;
} Elf64_Ehdr;







typedef struct elf32_phdr{
Elf32_Word p_type;
Elf32_Off p_offset;
Elf32_Addr p_vaddr;
Elf32_Addr p_paddr;
Elf32_Word p_filesz;
Elf32_Word p_memsz;
Elf32_Word p_flags;
Elf32_Word p_align;
} Elf32_Phdr;

typedef struct elf64_phdr {
Elf64_Word p_type;
Elf64_Word p_flags;
Elf64_Off p_offset;
Elf64_Addr p_vaddr;
Elf64_Addr p_paddr;
Elf64_Xword p_filesz;
Elf64_Xword p_memsz;
Elf64_Xword p_align;
} Elf64_Phdr;
# 308 "./include/uapi/linux/elf.h"
typedef struct elf32_shdr {
Elf32_Word sh_name;
Elf32_Word sh_type;
Elf32_Word sh_flags;
Elf32_Addr sh_addr;
Elf32_Off sh_offset;
Elf32_Word sh_size;
Elf32_Word sh_link;
Elf32_Word sh_info;
Elf32_Word sh_addralign;
Elf32_Word sh_entsize;
} Elf32_Shdr;

typedef struct elf64_shdr {
Elf64_Word sh_name;
Elf64_Word sh_type;
Elf64_Xword sh_flags;
Elf64_Addr sh_addr;
Elf64_Off sh_offset;
Elf64_Xword sh_size;
Elf64_Word sh_link;
Elf64_Word sh_info;
Elf64_Xword sh_addralign;
Elf64_Xword sh_entsize;
} Elf64_Shdr;
# 444 "./include/uapi/linux/elf.h"
typedef struct elf32_note {
Elf32_Word n_namesz;
Elf32_Word n_descsz;
Elf32_Word n_type;
} Elf32_Nhdr;


typedef struct elf64_note {
Elf64_Word n_namesz;
Elf64_Word n_descsz;
Elf64_Word n_type;
} Elf64_Nhdr;
# 8 "./include/linux/elf.h" 2
# 52 "./include/linux/elf.h"
extern Elf64_Dyn _DYNAMIC [];
# 65 "./include/linux/elf.h"
struct file;
struct coredump_params;


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int elf_coredump_extra_notes_size(void) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; }
# 81 "./include/linux/elf.h"
struct gnu_property {
u32 pr_type;
u32 pr_datasz;
};

struct arch_elf_state;


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_parse_elf_property(u32 type, const void *data,
size_t datasz, bool compat,
struct arch_elf_state *arch)
{
return 0;
}
# 104 "./include/linux/elf.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int arch_elf_adjust_prot(int prot,
const struct arch_elf_state *state,
bool has_interp, bool is_interp)
{
return prot;
}
# 20 "./include/linux/module.h" 2


# 1 "./include/linux/moduleparam.h" 1
# 36 "./include/linux/moduleparam.h"
struct kernel_param;






enum {
KERNEL_PARAM_OPS_FL_NOARG = (1 << 0)
};

struct kernel_param_ops {

unsigned int flags;

int (*set)(const char *val, const struct kernel_param *kp);

int (*get)(char *buffer, const struct kernel_param *kp);

void (*free)(void *arg);
};







enum {
KERNEL_PARAM_FL_UNSAFE = (1 << 0),
KERNEL_PARAM_FL_HWPARAM = (1 << 1),
};

struct kernel_param {
const char *name;
struct module *mod;
const struct kernel_param_ops *ops;
const u16 perm;
s8 level;
u8 flags;
union {
void *arg;
const struct kparam_string *str;
const struct kparam_array *arr;
};
};

extern const struct kernel_param __start___param[], __stop___param[];


struct kparam_string {
unsigned int maxlen;
char *string;
};


struct kparam_array
{
unsigned int max;
unsigned int elemsize;
unsigned int *num;
const struct kernel_param_ops *ops;
void *elem;
};
# 304 "./include/linux/moduleparam.h"
extern void kernel_param_lock(struct module *mod);
extern void kernel_param_unlock(struct module *mod);
# 372 "./include/linux/moduleparam.h"
extern bool parameq(const char *name1, const char *name2);
# 382 "./include/linux/moduleparam.h"
extern bool parameqn(const char *name1, const char *name2, size_t n);


extern char *parse_args(const char *name,
char *args,
const struct kernel_param *params,
unsigned num,
s16 level_min,
s16 level_max,
void *arg,
int (*unknown)(char *param, char *val,
const char *doing, void *arg));



extern void destroy_params(const struct kernel_param *params, unsigned num);
# 411 "./include/linux/moduleparam.h"
extern const struct kernel_param_ops param_ops_byte;
extern int param_set_byte(const char *val, const struct kernel_param *kp);
extern int param_get_byte(char *buffer, const struct kernel_param *kp);


extern const struct kernel_param_ops param_ops_short;
extern int param_set_short(const char *val, const struct kernel_param *kp);
extern int param_get_short(char *buffer, const struct kernel_param *kp);


extern const struct kernel_param_ops param_ops_ushort;
extern int param_set_ushort(const char *val, const struct kernel_param *kp);
extern int param_get_ushort(char *buffer, const struct kernel_param *kp);


extern const struct kernel_param_ops param_ops_int;
extern int param_set_int(const char *val, const struct kernel_param *kp);
extern int param_get_int(char *buffer, const struct kernel_param *kp);


extern const struct kernel_param_ops param_ops_uint;
extern int param_set_uint(const char *val, const struct kernel_param *kp);
extern int param_get_uint(char *buffer, const struct kernel_param *kp);
int param_set_uint_minmax(const char *val, const struct kernel_param *kp,
unsigned int min, unsigned int max);


extern const struct kernel_param_ops param_ops_long;
extern int param_set_long(const char *val, const struct kernel_param *kp);
extern int param_get_long(char *buffer, const struct kernel_param *kp);


extern const struct kernel_param_ops param_ops_ulong;
extern int param_set_ulong(const char *val, const struct kernel_param *kp);
extern int param_get_ulong(char *buffer, const struct kernel_param *kp);


extern const struct kernel_param_ops param_ops_ullong;
extern int param_set_ullong(const char *val, const struct kernel_param *kp);
extern int param_get_ullong(char *buffer, const struct kernel_param *kp);


extern const struct kernel_param_ops param_ops_hexint;
extern int param_set_hexint(const char *val, const struct kernel_param *kp);
extern int param_get_hexint(char *buffer, const struct kernel_param *kp);


extern const struct kernel_param_ops param_ops_charp;
extern int param_set_charp(const char *val, const struct kernel_param *kp);
extern int param_get_charp(char *buffer, const struct kernel_param *kp);
extern void param_free_charp(void *arg);



extern const struct kernel_param_ops param_ops_bool;
extern int param_set_bool(const char *val, const struct kernel_param *kp);
extern int param_get_bool(char *buffer, const struct kernel_param *kp);


extern const struct kernel_param_ops param_ops_bool_enable_only;
extern int param_set_bool_enable_only(const char *val,
const struct kernel_param *kp);



extern const struct kernel_param_ops param_ops_invbool;
extern int param_set_invbool(const char *val, const struct kernel_param *kp);
extern int param_get_invbool(char *buffer, const struct kernel_param *kp);



extern const struct kernel_param_ops param_ops_bint;
extern int param_set_bint(const char *val, const struct kernel_param *kp);
# 526 "./include/linux/moduleparam.h"
enum hwparam_type {
hwparam_ioport,
hwparam_iomem,
hwparam_ioport_or_iomem,
hwparam_irq,
hwparam_dma,
hwparam_dma_addr,
hwparam_other,
};
# 587 "./include/linux/moduleparam.h"
extern const struct kernel_param_ops param_array_ops;

extern const struct kernel_param_ops param_ops_string;
extern int param_set_copystring(const char *val, const struct kernel_param *);
extern int param_get_string(char *buffer, const struct kernel_param *kp);



struct module;


extern int module_param_sysfs_setup(struct module *mod,
const struct kernel_param *kparam,
unsigned int num_params);

extern void module_param_sysfs_remove(struct module *mod);
# 23 "./include/linux/module.h" 2


# 1 "./include/linux/rbtree_latch.h" 1
# 40 "./include/linux/rbtree_latch.h"
struct latch_tree_node {
struct rb_node node[2];
};

struct latch_tree_root {
seqcount_latch_t seq;
struct rb_root tree[2];
};
# 64 "./include/linux/rbtree_latch.h"
struct latch_tree_ops {
bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b);
int (*comp)(void *key, struct latch_tree_node *b);
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) struct latch_tree_node *
__lt_from_rb(struct rb_node *node, int idx)
{
return ({ void *__mptr = (void *)(node); _Static_assert(__builtin_types_compatible_p(typeof(*(node)), typeof(((struct latch_tree_node *)0)->node[idx])) || __builtin_types_compatible_p(typeof(*(node)), typeof(void)), "pointer type mismatch in container_of()"); ((struct latch_tree_node *)(__mptr - __builtin_offsetof(struct latch_tree_node, node[idx]))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
__lt_insert(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx,
bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b))
{
struct rb_root *root = &ltr->tree[idx];
struct rb_node **link = &root->rb_node;
struct rb_node *node = &ltn->node[idx];
struct rb_node *parent = ((void *)0);
struct latch_tree_node *ltp;

while (*link) {
parent = *link;
ltp = __lt_from_rb(parent, idx);

if (less(ltn, ltp))
link = &parent->rb_left;
else
link = &parent->rb_right;
}

rb_link_node_rcu(node, parent, link);
rb_insert_color(node, root);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
__lt_erase(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx)
{
rb_erase(&ltn->node[idx], &ltr->tree[idx]);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) struct latch_tree_node *
__lt_find(void *key, struct latch_tree_root *ltr, int idx,
int (*comp)(void *key, struct latch_tree_node *node))
{
struct rb_node *node = ({ typeof(ltr->tree[idx].rb_node) __UNIQUE_ID_rcu252 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_253(void) ; if (!((sizeof(ltr->tree[idx].rb_node) == sizeof(char) || sizeof(ltr->tree[idx].rb_node) == sizeof(short) || sizeof(ltr->tree[idx].rb_node) == sizeof(int) || sizeof(ltr->tree[idx].rb_node) == sizeof(long)) || sizeof(ltr->tree[idx].rb_node) == sizeof(long long))) __compiletime_assert_253(); } while (0); (*(const volatile typeof( _Generic((ltr->tree[idx].rb_node), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ltr->tree[idx].rb_node))) *)&(ltr->tree[idx].rb_node)); }); ((typeof(*ltr->tree[idx].rb_node) *)(__UNIQUE_ID_rcu252)); });
struct latch_tree_node *ltn;
int c;

while (node) {
ltn = __lt_from_rb(node, idx);
c = comp(key, ltn);

if (c < 0)
node = ({ typeof(node->rb_left) __UNIQUE_ID_rcu254 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_255(void) ; if (!((sizeof(node->rb_left) == sizeof(char) || sizeof(node->rb_left) == sizeof(short) || sizeof(node->rb_left) == sizeof(int) || sizeof(node->rb_left) == sizeof(long)) || sizeof(node->rb_left) == sizeof(long long))) __compiletime_assert_255(); } while (0); (*(const volatile typeof( _Generic((node->rb_left), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (node->rb_left))) *)&(node->rb_left)); }); ((typeof(*node->rb_left) *)(__UNIQUE_ID_rcu254)); });
else if (c > 0)
node = ({ typeof(node->rb_right) __UNIQUE_ID_rcu256 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_257(void) ; if (!((sizeof(node->rb_right) == sizeof(char) || sizeof(node->rb_right) == sizeof(short) || sizeof(node->rb_right) == sizeof(int) || sizeof(node->rb_right) == sizeof(long)) || sizeof(node->rb_right) == sizeof(long long))) __compiletime_assert_257(); } while (0); (*(const volatile typeof( _Generic((node->rb_right), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (node->rb_right))) *)&(node->rb_right)); }); ((typeof(*node->rb_right) *)(__UNIQUE_ID_rcu256)); });
else
return ltn;
}

return ((void *)0);
}
# 143 "./include/linux/rbtree_latch.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
latch_tree_insert(struct latch_tree_node *node,
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
raw_write_seqcount_latch(&root->seq);
__lt_insert(node, root, 0, ops->less);
raw_write_seqcount_latch(&root->seq);
__lt_insert(node, root, 1, ops->less);
}
# 170 "./include/linux/rbtree_latch.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
latch_tree_erase(struct latch_tree_node *node,
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
raw_write_seqcount_latch(&root->seq);
__lt_erase(node, root, 0);
raw_write_seqcount_latch(&root->seq);
__lt_erase(node, root, 1);
}
# 199 "./include/linux/rbtree_latch.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) struct latch_tree_node *
latch_tree_find(void *key, struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
struct latch_tree_node *node;
unsigned int seq;

do {
seq = raw_read_seqcount_latch(&root->seq);
node = __lt_find(key, root, seq & 1, ops->comp);
} while (read_seqcount_latch_retry(&root->seq, seq));

return node;
}
# 26 "./include/linux/module.h" 2
# 1 "./include/linux/error-injection.h" 1





# 1 "./include/asm-generic/error-injection.h" 1





enum {
EI_ETYPE_NONE,
EI_ETYPE_NULL,
EI_ETYPE_ERRNO,
EI_ETYPE_ERRNO_NULL,
EI_ETYPE_TRUE,
};

struct error_injection_entry {
unsigned long addr;
int etype;
};

struct pt_regs;
# 34 "./include/asm-generic/error-injection.h"
void override_function_with_return(struct pt_regs *regs);
# 7 "./include/linux/error-injection.h" 2



extern bool within_error_injection_list(unsigned long addr);
extern int get_injectable_error_type(unsigned long addr);
# 27 "./include/linux/module.h" 2



# 1 "./include/linux/cfi.h" 1
# 31 "./include/linux/module.h" 2


# 1 "./arch/riscv/include/asm/module.h" 1






# 1 "./include/asm-generic/module.h" 1
# 8 "./arch/riscv/include/asm/module.h" 2

struct module;
unsigned long module_emit_got_entry(struct module *mod, unsigned long val);
unsigned long module_emit_plt_entry(struct module *mod, unsigned long val);


struct mod_section {
Elf64_Shdr *shdr;
int num_entries;
int max_entries;
};

struct mod_arch_specific {
struct mod_section got;
struct mod_section plt;
struct mod_section got_plt;
};

struct got_entry {
unsigned long symbol_addr;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct got_entry emit_got_entry(unsigned long val)
{
return (struct got_entry) {val};
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct got_entry *get_got_entry(unsigned long val,
const struct mod_section *sec)
{
struct got_entry *got = (struct got_entry *)(sec->shdr->sh_addr);
int i;
for (i = 0; i < sec->num_entries; i++) {
if (got[i].symbol_addr == val)
return &got[i];
}
return ((void *)0);
}

struct plt_entry {




u32 insn_auipc;
u32 insn_ld;
u32 insn_jr;
};







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct plt_entry emit_plt_entry(unsigned long val,
unsigned long plt,
unsigned long got_plt)
{
# 79 "./arch/riscv/include/asm/module.h"
unsigned long offset = got_plt - plt;
u32 hi20 = (offset + 0x800) & 0xfffff000;
u32 lo12 = (offset - hi20);
return (struct plt_entry) {
0x0017 | (0x5 << 7) | hi20,
0x3003 | (lo12 << 20) | (0x5 << 15) | (0x6 << 7),
0x0067 | (0x6 << 15)
};
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int get_got_plt_idx(unsigned long val, const struct mod_section *sec)
{
struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr;
int i;
for (i = 0; i < sec->num_entries; i++) {
if (got_plt[i].symbol_addr == val)
return i;
}
return -1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct plt_entry *get_plt_entry(unsigned long val,
const struct mod_section *sec_plt,
const struct mod_section *sec_got_plt)
{
struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr;
int got_plt_idx = get_got_plt_idx(val, sec_got_plt);
if (got_plt_idx >= 0)
return plt + got_plt_idx;
else
return ((void *)0);
}
# 34 "./include/linux/module.h" 2



struct modversion_info {
unsigned long crc;
char name[(64 - sizeof(unsigned long))];
};

struct module;
struct exception_table_entry;

struct module_kobject {
struct kobject kobj;
struct module *mod;
struct kobject *drivers_dir;
struct module_param_attrs *mp;
struct completion *kobj_completion;
} ;

struct module_attribute {
struct attribute attr;
ssize_t (*show)(struct module_attribute *, struct module_kobject *,
char *);
ssize_t (*store)(struct module_attribute *, struct module_kobject *,
const char *, size_t count);
void (*setup)(struct module *, const char *);
int (*test)(struct module *);
void (*free)(struct module *);
};

struct module_version_attribute {
struct module_attribute mattr;
const char *module_name;
const char *version;
};

extern ssize_t __modver_version_show(struct module_attribute *,
struct module_kobject *, char *);

extern struct module_attribute module_uevent;


extern int init_module(void);
extern void cleanup_module(void);
# 296 "./include/linux/module.h"
struct notifier_block;



extern int modules_disabled;

void *__symbol_get(const char *symbol);
void *__symbol_get_gpl(const char *symbol);



struct module_use {
struct list_head source_list;
struct list_head target_list;
struct module *source, *target;
};

enum module_state {
MODULE_STATE_LIVE,
MODULE_STATE_COMING,
MODULE_STATE_GOING,
MODULE_STATE_UNFORMED,
};

struct mod_tree_node {
struct module *mod;
struct latch_tree_node node;
};

struct module_layout {

void *base;

unsigned int size;

unsigned int text_size;

unsigned int ro_size;

unsigned int ro_after_init_size;


struct mod_tree_node mtn;

};
# 349 "./include/linux/module.h"
struct mod_kallsyms {
Elf64_Sym *symtab;
unsigned int num_symtab;
char *strtab;
char *typetab;
};
# 365 "./include/linux/module.h"
struct module {
enum module_state state;


struct list_head list;


char name[(64 - sizeof(unsigned long))];







struct module_kobject mkobj;
struct module_attribute *modinfo_attrs;
const char *version;
const char *srcversion;
struct kobject *holders_dir;


const struct kernel_symbol *syms;
const s32 *crcs;
unsigned int num_syms;







struct mutex param_lock;

struct kernel_param *kp;
unsigned int num_kp;


unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
const s32 *gpl_crcs;
bool using_gplonly_symbols;






bool async_probe_requested;


unsigned int num_exentries;
struct exception_table_entry *extable;


int (*init)(void);


struct module_layout core_layout __attribute__((__aligned__((1 << 6))));
struct module_layout init_layout;


struct mod_arch_specific arch;

unsigned long taints;



unsigned num_bugs;
struct list_head bug_list;
struct bug_entry *bug_table;




struct mod_kallsyms *kallsyms;
struct mod_kallsyms core_kallsyms;


struct module_sect_attrs *sect_attrs;


struct module_notes_attrs *notes_attrs;




char *args;



void *percpu;
unsigned int percpu_size;

void *noinstr_text_start;
unsigned int noinstr_text_size;


unsigned int num_tracepoints;
tracepoint_ptr_t *tracepoints_ptrs;


unsigned int num_srcu_structs;
struct srcu_struct **srcu_struct_ptrs;


unsigned int num_bpf_raw_events;
struct bpf_raw_event_map *bpf_raw_events;
# 483 "./include/linux/module.h"
unsigned int num_trace_bprintk_fmt;
const char **trace_bprintk_fmt_start;


struct trace_event_call **trace_events;
unsigned int num_trace_events;
struct trace_eval_map **trace_evals;
unsigned int num_trace_evals;


unsigned int num_ftrace_callsites;
unsigned long *ftrace_callsites;


void *kprobes_text_start;
unsigned int kprobes_text_size;
unsigned long *kprobe_blacklist;
unsigned int num_kprobe_blacklist;
# 522 "./include/linux/module.h"
struct list_head source_list;

struct list_head target_list;


void (*exit)(void);

atomic_t refcnt;
# 539 "./include/linux/module.h"
struct error_injection_entry *ei_funcs;
unsigned int num_ei_funcs;

} __attribute__((__aligned__((1 << 6)))) ;





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long kallsyms_symbol_value(const Elf64_Sym *sym)
{
return sym->st_value;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool module_is_live(struct module *mod)
{
return mod->state != MODULE_STATE_GOING;
}

struct module *__module_text_address(unsigned long addr);
struct module *__module_address(unsigned long addr);
bool is_module_address(unsigned long addr);
bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
bool is_module_percpu_address(unsigned long addr);
bool is_module_text_address(unsigned long addr);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool within_module_core(unsigned long addr,
const struct module *mod)
{
return (unsigned long)mod->core_layout.base <= addr &&
addr < (unsigned long)mod->core_layout.base + mod->core_layout.size;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool within_module_init(unsigned long addr,
const struct module *mod)
{
return (unsigned long)mod->init_layout.base <= addr &&
addr < (unsigned long)mod->init_layout.base + mod->init_layout.size;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool within_module(unsigned long addr, const struct module *mod)
{
return within_module_init(addr, mod) || within_module_core(addr, mod);
}


struct module *find_module(const char *name);



int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *name, char *module_name, int *exported);


unsigned long module_kallsyms_lookup_name(const char *name);

extern void __attribute__((__noreturn__)) __module_put_and_kthread_exit(struct module *mod,
long code);



int module_refcount(struct module *mod);
void __symbol_put(const char *symbol);

void symbol_put_addr(void *addr);



extern void __module_get(struct module *module);



extern bool try_module_get(struct module *module);

extern void module_put(struct module *module);
# 643 "./include/linux/module.h"
void *dereference_module_function_descriptor(struct module *mod, void *ptr);




const char *module_address_lookup(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset,
char **modname, const unsigned char **modbuildid,
char *namebuf);
int lookup_module_symbol_name(unsigned long addr, char *symname);
int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);

int register_module_notifier(struct notifier_block *nb);
int unregister_module_notifier(struct notifier_block *nb);

extern void print_modules(void);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool module_requested_async_probing(struct module *module)
{
return module && module->async_probe_requested;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_livepatch_module(struct module *mod)
{
return false;
}


bool is_module_sig_enforced(void);
void set_module_sig_enforced(void);
# 824 "./include/linux/module.h"
extern struct kset *module_kset;
extern struct kobj_type module_ktype;
extern int module_sysfs_initialized;
# 836 "./include/linux/module.h"
void module_bug_finalize(const Elf64_Ehdr *, const Elf64_Shdr *,
struct module *);
void module_bug_cleanup(struct module *);
# 853 "./include/linux/module.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool retpoline_module_ok(bool has_retpoline)
{
return true;
}
# 865 "./include/linux/module.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool module_sig_ok(struct module *module)
{
return true;
}


int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
struct module *, unsigned long),
void *data);
# 22 "./include/linux/device/driver.h" 2
# 45 "./include/linux/device/driver.h"
enum probe_type {
PROBE_DEFAULT_STRATEGY,
PROBE_PREFER_ASYNCHRONOUS,
PROBE_FORCE_SYNCHRONOUS,
};
# 96 "./include/linux/device/driver.h"
struct device_driver {
const char *name;
struct bus_type *bus;

struct module *owner;
const char *mod_name;

bool suppress_bind_attrs;
enum probe_type probe_type;

const struct of_device_id *of_match_table;
const struct acpi_device_id *acpi_match_table;

int (*probe) (struct device *dev);
void (*sync_state)(struct device *dev);
int (*remove) (struct device *dev);
void (*shutdown) (struct device *dev);
int (*suspend) (struct device *dev, pm_message_t state);
int (*resume) (struct device *dev);
const struct attribute_group **groups;
const struct attribute_group **dev_groups;

const struct dev_pm_ops *pm;
void (*coredump) (struct device *dev);

struct driver_private *p;
};


extern int __attribute__((__warn_unused_result__)) driver_register(struct device_driver *drv);
extern void driver_unregister(struct device_driver *drv);

extern struct device_driver *driver_find(const char *name,
struct bus_type *bus);
extern int driver_probe_done(void);
extern void wait_for_device_probe(void);



struct driver_attribute {
struct attribute attr;
ssize_t (*show)(struct device_driver *driver, char *buf);
ssize_t (*store)(struct device_driver *driver, const char *buf,
size_t count);
};
# 149 "./include/linux/device/driver.h"
extern int __attribute__((__warn_unused_result__)) driver_create_file(struct device_driver *driver,
const struct driver_attribute *attr);
extern void driver_remove_file(struct device_driver *driver,
const struct driver_attribute *attr);

extern int __attribute__((__warn_unused_result__)) driver_for_each_device(struct device_driver *drv,
struct device *start,
void *data,
int (*fn)(struct device *dev,
void *));
struct device *driver_find_device(struct device_driver *drv,
struct device *start, const void *data,
int (*match)(struct device *dev, const void *data));







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *driver_find_device_by_name(struct device_driver *drv,
const char *name)
{
return driver_find_device(drv, ((void *)0), name, device_match_name);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *
driver_find_device_by_of_node(struct device_driver *drv,
const struct device_node *np)
{
return driver_find_device(drv, ((void *)0), np, device_match_of_node);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *
driver_find_device_by_fwnode(struct device_driver *drv,
const struct fwnode_handle *fwnode)
{
return driver_find_device(drv, ((void *)0), fwnode, device_match_fwnode);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *driver_find_device_by_devt(struct device_driver *drv,
dev_t devt)
{
return driver_find_device(drv, ((void *)0), &devt, device_match_devt);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *driver_find_next_device(struct device_driver *drv,
struct device *start)
{
return driver_find_device(drv, start, ((void *)0), device_match_any);
}
# 233 "./include/linux/device/driver.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *
driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
{
return ((void *)0);
}


extern int driver_deferred_probe_timeout;
void driver_deferred_probe_add(struct device *dev);
int driver_deferred_probe_check_state(struct device *dev);
void driver_init(void);
# 33 "./include/linux/device.h" 2
# 1 "./arch/riscv/include/generated/asm/device.h" 1
# 1 "./include/asm-generic/device.h" 1







struct dev_archdata {
};

struct pdev_archdata {
};
# 2 "./arch/riscv/include/generated/asm/device.h" 2
# 34 "./include/linux/device.h" 2

struct device;
struct device_private;
struct device_driver;
struct driver_private;
struct module;
struct class;
struct subsys_private;
struct device_node;
struct fwnode_handle;
struct iommu_ops;
struct iommu_group;
struct dev_pin_info;
struct dev_iommu;
struct msi_device_data;
# 63 "./include/linux/device.h"
struct subsys_interface {
const char *name;
struct bus_type *subsys;
struct list_head node;
int (*add_dev)(struct device *dev, struct subsys_interface *sif);
void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
};

int subsys_interface_register(struct subsys_interface *sif);
void subsys_interface_unregister(struct subsys_interface *sif);

int subsys_system_register(struct bus_type *subsys,
const struct attribute_group **groups);
int subsys_virtual_register(struct bus_type *subsys,
const struct attribute_group **groups);
# 88 "./include/linux/device.h"
struct device_type {
const char *name;
const struct attribute_group **groups;
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
char *(*devnode)(struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid);
void (*release)(struct device *dev);

const struct dev_pm_ops *pm;
};


struct device_attribute {
struct attribute attr;
ssize_t (*show)(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t (*store)(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
};

struct dev_ext_attribute {
struct device_attribute attr;
void *var;
};

ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
# 154 "./include/linux/device.h"
int device_create_file(struct device *device,
const struct device_attribute *entry);
void device_remove_file(struct device *dev,
const struct device_attribute *attr);
bool device_remove_file_self(struct device *dev,
const struct device_attribute *attr);
int __attribute__((__warn_unused_result__)) device_create_bin_file(struct device *dev,
const struct bin_attribute *attr);
void device_remove_bin_file(struct device *dev,
const struct bin_attribute *attr);


typedef void (*dr_release_t)(struct device *dev, void *res);
typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);

void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
int nid, const char *name) __attribute__((__malloc__));





void devres_for_each_res(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data,
void (*fn)(struct device *, void *, void *),
void *data);
void devres_free(void *res);
void devres_add(struct device *dev, void *res);
void *devres_find(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data);
void *devres_get(struct device *dev, void *new_res,
dr_match_t match, void *match_data);
void *devres_remove(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data);
int devres_destroy(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data);
int devres_release(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data);


void * __attribute__((__warn_unused_result__)) devres_open_group(struct device *dev, void *id, gfp_t gfp);
void devres_close_group(struct device *dev, void *id);
void devres_remove_group(struct device *dev, void *id);
int devres_release_group(struct device *dev, void *id);


void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __attribute__((__malloc__));
void *devm_krealloc(struct device *dev, void *ptr, size_t size,
gfp_t gfp) __attribute__((__warn_unused_result__));
__attribute__((__format__(printf, 3, 0))) char *devm_kvasprintf(struct device *dev, gfp_t gfp,
const char *fmt, va_list ap) __attribute__((__malloc__));
__attribute__((__format__(printf, 3, 4))) char *devm_kasprintf(struct device *dev, gfp_t gfp,
const char *fmt, ...) __attribute__((__malloc__));
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
{
return devm_kmalloc(dev, size, gfp | (( gfp_t)0x100u));
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *devm_kmalloc_array(struct device *dev,
size_t n, size_t size, gfp_t flags)
{
size_t bytes;

if (__builtin_expect(!!(__must_check_overflow(({ typeof(n) __a = (n); typeof(size) __b = (size); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); }))), 0))
return ((void *)0);

return devm_kmalloc(dev, bytes, flags);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *devm_kcalloc(struct device *dev,
size_t n, size_t size, gfp_t flags)
{
return devm_kmalloc_array(dev, n, size, flags | (( gfp_t)0x100u));
}
void devm_kfree(struct device *dev, const void *p);
char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __attribute__((__malloc__));
const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);

unsigned long devm_get_free_pages(struct device *dev,
gfp_t gfp_mask, unsigned int order);
void devm_free_pages(struct device *dev, unsigned long addr);

void *devm_ioremap_resource(struct device *dev,
const struct resource *res);
void *devm_ioremap_resource_wc(struct device *dev,
const struct resource *res);

void *devm_of_iomap(struct device *dev,
struct device_node *node, int index,
resource_size_t *size);


int devm_add_action(struct device *dev, void (*action)(void *), void *data);
void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
void devm_release_action(struct device *dev, void (*action)(void *), void *data);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int devm_add_action_or_reset(struct device *dev,
void (*action)(void *), void *data)
{
int ret;

ret = devm_add_action(dev, action, data);
if (ret)
action(data);

return ret;
}
# 276 "./include/linux/device.h"
void *__devm_alloc_percpu(struct device *dev, size_t size,
size_t align);
void devm_free_percpu(struct device *dev, void *pdata);

struct device_dma_parameters {




unsigned int max_segment_size;
unsigned int min_align_mask;
unsigned long segment_boundary_mask;
};
# 299 "./include/linux/device.h"
enum device_link_state {
DL_STATE_NONE = -1,
DL_STATE_DORMANT = 0,
DL_STATE_AVAILABLE,
DL_STATE_CONSUMER_PROBE,
DL_STATE_ACTIVE,
DL_STATE_SUPPLIER_UNBIND,
};
# 338 "./include/linux/device.h"
enum dl_dev_state {
DL_DEV_NO_DRIVER = 0,
DL_DEV_PROBING,
DL_DEV_DRIVER_BOUND,
DL_DEV_UNBINDING,
};
# 354 "./include/linux/device.h"
enum device_removable {
DEVICE_REMOVABLE_NOT_SUPPORTED = 0,
DEVICE_REMOVABLE_UNKNOWN,
DEVICE_FIXED,
DEVICE_REMOVABLE,
};
# 368 "./include/linux/device.h"
struct dev_links_info {
struct list_head suppliers;
struct list_head consumers;
struct list_head defer_sync;
enum dl_dev_state status;
};






struct dev_msi_info {

struct irq_domain *domain;


struct msi_device_data *data;

};
# 486 "./include/linux/device.h"
struct device {
struct kobject kobj;
struct device *parent;

struct device_private *p;

const char *init_name;
const struct device_type *type;

struct bus_type *bus;
struct device_driver *driver;

void *platform_data;

void *driver_data;




struct mutex mutex;



struct dev_links_info links;
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
# 520 "./include/linux/device.h"
struct dev_msi_info msi;



u64 *dma_mask;
u64 coherent_dma_mask;




u64 bus_dma_limit;
const struct bus_dma_region *dma_range_map;

struct device_dma_parameters *dma_parms;

struct list_head dma_pools;


struct dma_coherent_mem *dma_mem;







struct io_tlb_mem *dma_io_tlb_mem;


struct dev_archdata archdata;

struct device_node *of_node;
struct fwnode_handle *fwnode;




dev_t devt;
u32 id;

spinlock_t devres_lock;
struct list_head devres_head;

struct class *class;
const struct attribute_group **groups;

void (*release)(struct device *dev);
struct iommu_group *iommu_group;
struct dev_iommu *iommu;

enum device_removable removable;

bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
bool state_synced:1;
bool can_match:1;
# 585 "./include/linux/device.h"
};
# 601 "./include/linux/device.h"
struct device_link {
struct device *supplier;
struct list_head s_node;
struct device *consumer;
struct list_head c_node;
struct device link_dev;
enum device_link_state status;
u32 flags;
refcount_t rpm_active;
struct kref kref;
struct work_struct rm_work;
bool supplier_preactivated;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device *kobj_to_dev(struct kobject *kobj)
{
return ({ void *__mptr = (void *)(kobj); _Static_assert(__builtin_types_compatible_p(typeof(*(kobj)), typeof(((struct device *)0)->kobj)) || __builtin_types_compatible_p(typeof(*(kobj)), typeof(void)), "pointer type mismatch in container_of()"); ((struct device *)(__mptr - __builtin_offsetof(struct device, kobj))); });
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool device_iommu_mapped(struct device *dev)
{
return (dev->iommu_group != ((void *)0));
}



# 1 "./include/linux/pm_wakeup.h" 1
# 18 "./include/linux/pm_wakeup.h"
struct wake_irq;
# 43 "./include/linux/pm_wakeup.h"
struct wakeup_source {
const char *name;
int id;
struct list_head entry;
spinlock_t lock;
struct wake_irq *wakeirq;
struct timer_list timer;
unsigned long timer_expires;
ktime_t total_time;
ktime_t max_time;
ktime_t last_time;
ktime_t start_prevent_time;
ktime_t prevent_sleep_time;
unsigned long event_count;
unsigned long active_count;
unsigned long relax_count;
unsigned long expire_count;
unsigned long wakeup_count;
struct device *dev;
bool active:1;
bool autosleep_enabled:1;
};
# 123 "./include/linux/pm_wakeup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void device_set_wakeup_capable(struct device *dev, bool capable)
{
dev->power.can_wakeup = capable;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool device_can_wakeup(struct device *dev)
{
return dev->power.can_wakeup;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct wakeup_source *wakeup_source_create(const char *name)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wakeup_source_destroy(struct wakeup_source *ws) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wakeup_source_add(struct wakeup_source *ws) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wakeup_source_remove(struct wakeup_source *ws) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct wakeup_source *wakeup_source_register(struct device *dev,
const char *name)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wakeup_source_unregister(struct wakeup_source *ws) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int device_wakeup_enable(struct device *dev)
{
dev->power.should_wakeup = true;
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int device_wakeup_disable(struct device *dev)
{
dev->power.should_wakeup = false;
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int device_set_wakeup_enable(struct device *dev, bool enable)
{
dev->power.should_wakeup = enable;
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int device_init_wakeup(struct device *dev, bool val)
{
device_set_wakeup_capable(dev, val);
device_set_wakeup_enable(dev, val);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool device_may_wakeup(struct device *dev)
{
return dev->power.can_wakeup && dev->power.should_wakeup;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool device_wakeup_path(struct device *dev)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void device_set_wakeup_path(struct device *dev) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __pm_stay_awake(struct wakeup_source *ws) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pm_stay_awake(struct device *dev) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __pm_relax(struct wakeup_source *ws) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pm_relax(struct device *dev) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pm_wakeup_ws_event(struct wakeup_source *ws,
unsigned int msec, bool hard) {}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pm_wakeup_dev_event(struct device *dev, unsigned int msec,
bool hard) {}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
{
return pm_wakeup_ws_event(ws, msec, false);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pm_wakeup_event(struct device *dev, unsigned int msec)
{
return pm_wakeup_dev_event(dev, msec, false);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pm_wakeup_hard_event(struct device *dev)
{
return pm_wakeup_dev_event(dev, 0, true);
}
# 632 "./include/linux/device.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *dev_name(const struct device *dev)
{

if (dev->init_name)
return dev->init_name;

return kobject_name(&dev->kobj);
}
# 649 "./include/linux/device.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *dev_bus_name(const struct device *dev)
{
return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
}

__attribute__((__format__(printf, 2, 3))) int dev_set_name(struct device *dev, const char *name, ...);
# 666 "./include/linux/device.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dev_to_node(struct device *dev)
{
return (-1);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_dev_node(struct device *dev, int node)
{
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct irq_domain *dev_get_msi_domain(const struct device *dev)
{

return dev->msi.domain;



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
{

dev->msi.domain = d;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *dev_get_drvdata(const struct device *dev)
{
return dev->driver_data;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_set_drvdata(struct device *dev, void *data)
{
dev->driver_data = data;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pm_subsys_data *dev_to_psd(struct device *dev)
{
return dev ? dev->power.subsys_data : ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int dev_get_uevent_suppress(const struct device *dev)
{
return dev->kobj.uevent_suppress;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_set_uevent_suppress(struct device *dev, int val)
{
dev->kobj.uevent_suppress = val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int device_is_registered(struct device *dev)
{
return dev->kobj.state_in_sysfs;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void device_enable_async_suspend(struct device *dev)
{
if (!dev->power.is_prepared)
dev->power.async_suspend = true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void device_disable_async_suspend(struct device *dev)
{
if (!dev->power.is_prepared)
dev->power.async_suspend = false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool device_async_suspend_enabled(struct device *dev)
{
return !!dev->power.async_suspend;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool device_pm_not_required(struct device *dev)
{
return dev->power.no_pm;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void device_set_pm_not_required(struct device *dev)
{
dev->power.no_pm = true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_pm_syscore_device(struct device *dev, bool val)
{



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_pm_set_driver_flags(struct device *dev, u32 flags)
{
dev->power.driver_flags = flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
{
return !!(dev->power.driver_flags & flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void device_lock(struct device *dev)
{
mutex_lock_nested(&dev->mutex, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int device_lock_interruptible(struct device *dev)
{
return mutex_lock_interruptible_nested(&dev->mutex, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int device_trylock(struct device *dev)
{
return mutex_trylock(&dev->mutex);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void device_unlock(struct device *dev)
{
mutex_unlock(&dev->mutex);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void device_lock_assert(struct device *dev)
{
do { ({ int __ret_warn_on = !!(debug_locks && !(lock_is_held(&(&dev->mutex)->dep_map) != 0)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/device.h"), "i" (787), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct device_node *dev_of_node(struct device *dev)
{
if (!1 || !dev)
return ((void *)0);
return dev->of_node;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dev_has_sync_state(struct device *dev)
{
if (!dev)
return false;
if (dev->driver && dev->driver->sync_state)
return true;
if (dev->bus && dev->bus->sync_state)
return true;
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_set_removable(struct device *dev,
enum device_removable removable)
{
dev->removable = removable;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dev_is_removable(struct device *dev)
{
return dev->removable == DEVICE_REMOVABLE;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dev_removable_is_valid(struct device *dev)
{
return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED;
}




int __attribute__((__warn_unused_result__)) device_register(struct device *dev);
void device_unregister(struct device *dev);
void device_initialize(struct device *dev);
int __attribute__((__warn_unused_result__)) device_add(struct device *dev);
void device_del(struct device *dev);
int device_for_each_child(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
int device_for_each_child_reverse(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
struct device *device_find_child_by_name(struct device *parent,
const char *name);
int device_rename(struct device *dev, const char *new_name);
int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
kgid_t *gid, const char **tmp);
int device_is_dependent(struct device *dev, void *target);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool device_supports_offline(struct device *dev)
{
return dev->bus && dev->bus->offline && dev->bus->online;
}

void lock_device_hotplug(void);
void unlock_device_hotplug(void);
int lock_device_hotplug_sysfs(void);
int device_offline(struct device *dev);
int device_online(struct device *dev);
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
void device_set_node(struct device *dev, struct fwnode_handle *fwnode);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dev_num_vf(struct device *dev)
{
if (dev->bus && dev->bus->num_vf)
return dev->bus->num_vf(dev);
return 0;
}




struct device *__root_device_register(const char *name, struct module *owner);





void root_device_unregister(struct device *root);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *dev_get_platdata(const struct device *dev)
{
return dev->platform_data;
}





int __attribute__((__warn_unused_result__)) device_driver_attach(struct device_driver *drv,
struct device *dev);
int __attribute__((__warn_unused_result__)) device_bind_driver(struct device *dev);
void device_release_driver(struct device *dev);
int __attribute__((__warn_unused_result__)) device_attach(struct device *dev);
int __attribute__((__warn_unused_result__)) driver_attach(struct device_driver *drv);
void device_initial_probe(struct device *dev);
int __attribute__((__warn_unused_result__)) device_reprobe(struct device *dev);

bool device_is_bound(struct device *dev);




__attribute__((__format__(printf, 5, 6))) struct device *
device_create(struct class *cls, struct device *parent, dev_t devt,
void *drvdata, const char *fmt, ...);
__attribute__((__format__(printf, 6, 7))) struct device *
device_create_with_groups(struct class *cls, struct device *parent, dev_t devt,
void *drvdata, const struct attribute_group **groups,
const char *fmt, ...);
void device_destroy(struct class *cls, dev_t devt);

int __attribute__((__warn_unused_result__)) device_add_groups(struct device *dev,
const struct attribute_group **groups);
void device_remove_groups(struct device *dev,
const struct attribute_group **groups);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) device_add_group(struct device *dev,
const struct attribute_group *grp)
{
const struct attribute_group *groups[] = { grp, ((void *)0) };

return device_add_groups(dev, groups);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void device_remove_group(struct device *dev,
const struct attribute_group *grp)
{
const struct attribute_group *groups[] = { grp, ((void *)0) };

return device_remove_groups(dev, groups);
}

int __attribute__((__warn_unused_result__)) devm_device_add_groups(struct device *dev,
const struct attribute_group **groups);
void devm_device_remove_groups(struct device *dev,
const struct attribute_group **groups);
int __attribute__((__warn_unused_result__)) devm_device_add_group(struct device *dev,
const struct attribute_group *grp);
void devm_device_remove_group(struct device *dev,
const struct attribute_group *grp);







extern int (*platform_notify)(struct device *dev);

extern int (*platform_notify_remove)(struct device *dev);






struct device *get_device(struct device *dev);
void put_device(struct device *dev);
bool kill_device(struct device *dev);


int devtmpfs_mount(void);





void device_shutdown(void);


const char *dev_driver_string(const struct device *dev);


struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags);
void device_link_del(struct device_link *link);
void device_link_remove(void *consumer, struct device *supplier);
void device_links_supplier_sync_state_pause(void);
void device_links_supplier_sync_state_resume(void);

extern __attribute__((__format__(printf, 3, 4)))
int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
# 8 "./include/linux/dma-mapping.h" 2

# 1 "./include/linux/dma-direction.h" 1




enum dma_data_direction {
DMA_BIDIRECTIONAL = 0,
DMA_TO_DEVICE = 1,
DMA_FROM_DEVICE = 2,
DMA_NONE = 3,
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int valid_dma_direction(enum dma_data_direction dir)
{
return dir == DMA_BIDIRECTIONAL || dir == DMA_TO_DEVICE ||
dir == DMA_FROM_DEVICE;
}
# 10 "./include/linux/dma-mapping.h" 2
# 1 "./include/linux/scatterlist.h" 1
# 11 "./include/linux/scatterlist.h"
struct scatterlist {
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;



};
# 36 "./include/linux/scatterlist.h"
struct sg_table {
struct scatterlist *sgl;
unsigned int nents;
unsigned int orig_nents;
};

struct sg_append_table {
struct sg_table sgt;
struct scatterlist *prv;
unsigned int total_nents;
};
# 74 "./include/linux/scatterlist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int __sg_flags(struct scatterlist *sg)
{
return sg->page_link & (0x01UL | 0x02UL);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct scatterlist *sg_chain_ptr(struct scatterlist *sg)
{
return (struct scatterlist *)(sg->page_link & ~(0x01UL | 0x02UL));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sg_is_chain(struct scatterlist *sg)
{
return __sg_flags(sg) & 0x01UL;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sg_is_last(struct scatterlist *sg)
{
return __sg_flags(sg) & 0x02UL;
}
# 104 "./include/linux/scatterlist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sg_assign_page(struct scatterlist *sg, struct page *page)
{
unsigned long page_link = sg->page_link & (0x01UL | 0x02UL);





do { if (__builtin_expect(!!((unsigned long)page & (0x01UL | 0x02UL)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/scatterlist.h"), "i" (112), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);



sg->page_link = page_link | (unsigned long) page;
}
# 133 "./include/linux/scatterlist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sg_set_page(struct scatterlist *sg, struct page *page,
unsigned int len, unsigned int offset)
{
sg_assign_page(sg, page);
sg->offset = offset;
sg->length = len;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *sg_page(struct scatterlist *sg)
{



return (struct page *)((sg)->page_link & ~(0x01UL | 0x02UL));
}
# 156 "./include/linux/scatterlist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sg_set_buf(struct scatterlist *sg, const void *buf,
unsigned int buflen)
{



sg_set_page(sg, ((((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (((((({ unsigned long _x = (unsigned long)(buf); ((_x) >= kernel_map.page_offset && (!1 || (_x) < kernel_map.page_offset + (((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2))) ? ((unsigned long)(_x) - kernel_map.va_pa_offset) : ({ unsigned long _y = _x; (0 && _y < kernel_map.virt_addr + 0) ? ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - 0); }); })) >> (12))))))), buflen, ((unsigned long)(buf) & ~(~(((1UL) << (12)) - 1))));
}
# 185 "./include/linux/scatterlist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __sg_chain(struct scatterlist *chain_sg,
struct scatterlist *sgl)
{



chain_sg->offset = 0;
chain_sg->length = 0;





chain_sg->page_link = ((unsigned long) sgl | 0x01UL) & ~0x02UL;
}
# 211 "./include/linux/scatterlist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
__sg_chain(&prv[prv_nents - 1], sgl);
}
# 226 "./include/linux/scatterlist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sg_mark_end(struct scatterlist *sg)
{



sg->page_link |= 0x02UL;
sg->page_link &= ~0x01UL;
}
# 243 "./include/linux/scatterlist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sg_unmark_end(struct scatterlist *sg)
{
sg->page_link &= ~0x02UL;
}
# 258 "./include/linux/scatterlist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) dma_addr_t sg_phys(struct scatterlist *sg)
{
return ((((phys_addr_t)((unsigned long)((sg_page(sg)) - ((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))))) << (12)))) + sg->offset;
}
# 273 "./include/linux/scatterlist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *sg_virt(struct scatterlist *sg)
{
return lowmem_page_address(sg_page(sg)) + sg->offset;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sg_init_marker(struct scatterlist *sgl,
unsigned int nents)
{
sg_mark_end(&sgl[nents - 1]);
}

int sg_nents(struct scatterlist *sg);
int sg_nents_for_len(struct scatterlist *sg, u64 len);
struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
void sg_init_one(struct scatterlist *, const void *, unsigned int);
int sg_split(struct scatterlist *in, const int in_mapped_nents,
const off_t skip, const int nb_splits,
const size_t *split_sizes,
struct scatterlist **out, int *out_mapped_nents,
gfp_t gfp_mask);

typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);

void __sg_free_table(struct sg_table *, unsigned int, unsigned int,
sg_free_fn *, unsigned int);
void sg_free_table(struct sg_table *);
void sg_free_append_table(struct sg_append_table *sgt);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
int sg_alloc_append_table_from_pages(struct sg_append_table *sgt,
struct page **pages, unsigned int n_pages,
unsigned int offset, unsigned long size,
unsigned int max_segment,
unsigned int left_pages, gfp_t gfp_mask);
int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
unsigned int n_pages, unsigned int offset,
unsigned long size,
unsigned int max_segment, gfp_t gfp_mask);
# 342 "./include/linux/scatterlist.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sg_alloc_table_from_pages(struct sg_table *sgt,
struct page **pages,
unsigned int n_pages,
unsigned int offset,
unsigned long size, gfp_t gfp_mask)
{
return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset,
size, (~0U), gfp_mask);
}


struct scatterlist *sgl_alloc_order(unsigned long long length,
unsigned int order, bool chainable,
gfp_t gfp, unsigned int *nent_p);
struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
unsigned int *nent_p);
void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
void sgl_free_order(struct scatterlist *sgl, int order);
void sgl_free(struct scatterlist *sgl);


size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer);

size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
const void *buf, size_t buflen);
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);

size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
const void *buf, size_t buflen, off_t skip);
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
size_t buflen, off_t skip);
# 404 "./include/linux/scatterlist.h"
void sg_free_table_chained(struct sg_table *table,
unsigned nents_first_chunk);
int sg_alloc_table_chained(struct sg_table *table, int nents,
struct scatterlist *first_chunk,
unsigned nents_first_chunk);
# 421 "./include/linux/scatterlist.h"
struct sg_page_iter {
struct scatterlist *sg;
unsigned int sg_pgoffset;


unsigned int __nents;
int __pg_advance;

};
# 438 "./include/linux/scatterlist.h"
struct sg_dma_page_iter {
struct sg_page_iter base;
};

bool __sg_page_iter_next(struct sg_page_iter *piter);
bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter);
void __sg_page_iter_start(struct sg_page_iter *piter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgoffset);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *sg_page_iter_page(struct sg_page_iter *piter)
{
return ((sg_page(piter->sg)) + (piter->sg_pgoffset));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) dma_addr_t
sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
{
return ((dma_iter->base.sg)->dma_address) +
(dma_iter->base.sg_pgoffset << (12));
}
# 546 "./include/linux/scatterlist.h"
struct sg_mapping_iter {

struct page *page;
void *addr;
size_t length;
size_t consumed;
struct sg_page_iter piter;


unsigned int __offset;
unsigned int __remaining;
unsigned int __flags;
};

void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
unsigned int nents, unsigned int flags);
bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset);
bool sg_miter_next(struct sg_mapping_iter *miter);
void sg_miter_stop(struct sg_mapping_iter *miter);
# 11 "./include/linux/dma-mapping.h" 2

# 1 "./include/linux/mem_encrypt.h" 1
# 13 "./include/linux/dma-mapping.h" 2
# 83 "./include/linux/dma-mapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void debug_dma_mapping_error(struct device *dev,
dma_addr_t dma_addr)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void debug_dma_map_single(struct device *dev, const void *addr,
unsigned long len)
{
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
debug_dma_mapping_error(dev, dma_addr);

if (__builtin_expect(!!(dma_addr == (~(dma_addr_t)0)), 0))
return -12;
return 0;
}

dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
size_t offset, size_t size, enum dma_data_direction dir,
unsigned long attrs);
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs);
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs);
int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs);
dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir);
void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir);
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir);
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir);
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, unsigned long attrs);
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, unsigned long attrs);
void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
bool dma_can_mmap(struct device *dev);
int dma_supported(struct device *dev, u64 mask);
int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
size_t dma_max_mapping_size(struct device *dev);
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
unsigned long dma_get_merge_boundary(struct device *dev);
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
void dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir);
void *dma_vmap_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt);
void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
size_t size, struct sg_table *sgt);
# 302 "./include/linux/dma-mapping.h"
struct page *dma_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
void dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir);
int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
size_t size, struct page *page);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
{
struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
return page ? lowmem_page_address(page) : ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dma_free_noncoherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
{
dma_free_pages(dev, size, ((((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (((((({ unsigned long _x = (unsigned long)(vaddr); ((_x) >= kernel_map.page_offset && (!1 || (_x) < kernel_map.page_offset + (((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2))) ? ((unsigned long)(_x) - kernel_map.va_pa_offset) : ({ unsigned long _y = _x; (0 && _y < kernel_map.virt_addr + 0) ? ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - 0); }); })) >> (12))))))), dma_handle, dir);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{

if (({ static bool __attribute__((__section__(".data.once"))) __already_done; bool __ret_do_once = !!(is_vmalloc_addr(ptr)); if (__builtin_expect(!!(__ret_do_once && !__already_done), 0)) { __already_done = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("%s %s: " "rejecting DMA map of vmalloc memory\n", dev_driver_string(dev), dev_name(dev)); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (327), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_do_once), 0); }))

return (~(dma_addr_t)0);
debug_dma_map_single(dev, ptr, size);
return dma_map_page_attrs(dev, ((((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (((((({ unsigned long _x = (unsigned long)(ptr); ((_x) >= kernel_map.page_offset && (!1 || (_x) < kernel_map.page_offset + (((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2))) ? ((unsigned long)(_x) - kernel_map.va_pa_offset) : ({ unsigned long _y = _x; (0 && _y < kernel_map.virt_addr + 0) ? ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - 0); }); })) >> (12))))))), ((unsigned long)(ptr) & ~(~(((1UL) << (12)) - 1))),
size, dir, attrs);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t addr, unsigned long offset, size_t size,
enum dma_data_direction dir)
{
return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t addr, unsigned long offset, size_t size,
enum dma_data_direction dir)
{
return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
# 365 "./include/linux/dma-mapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs)
{
dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
}
# 383 "./include/linux/dma-mapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dma_sync_sgtable_for_cpu(struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
}
# 400 "./include/linux/dma-mapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dma_sync_sgtable_for_device(struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
}
# 415 "./include/linux/dma-mapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
return dma_alloc_attrs(dev, size, dma_handle, gfp,
(gfp & (( gfp_t)0x2000u)) ? (1UL << 8) : 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 dma_get_mask(struct device *dev)
{
if (dev->dma_mask && *dev->dma_mask)
return *dev->dma_mask;
return (((32) == 64) ? ~0ULL : ((1ULL<<(32))-1));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dma_set_mask_and_coherent(struct device *dev, u64 mask)
{
int rc = dma_set_mask(dev, mask);
if (rc == 0)
dma_set_coherent_mask(dev, mask);
return rc;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
{
dev->dma_mask = &dev->coherent_dma_mask;
return dma_set_mask_and_coherent(dev, mask);
}
# 468 "./include/linux/dma-mapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dma_addressing_limited(struct device *dev)
{
return ({ typeof(dma_get_mask(dev)) __x = (dma_get_mask(dev)); typeof(dev->bus_dma_limit) __y = (dev->bus_dma_limit); __x == 0 ? __y : ((__y == 0) ? __x : __builtin_choose_expr(((!!(sizeof((typeof(__x) *)1 == (typeof(__y) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(__x) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(__y) * 0l)) : (int *)8))))), ((__x) < (__y) ? (__x) : (__y)), ({ typeof(__x) __UNIQUE_ID___x258 = (__x); typeof(__y) __UNIQUE_ID___y259 = (__y); ((__UNIQUE_ID___x258) < (__UNIQUE_ID___y259) ? (__UNIQUE_ID___x258) : (__UNIQUE_ID___y259)); }))); }) <
dma_get_required_mask(dev);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int dma_get_max_seg_size(struct device *dev)
{
if (dev->dma_parms && dev->dma_parms->max_segment_size)
return dev->dma_parms->max_segment_size;
return 0x00010000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dma_set_max_seg_size(struct device *dev, unsigned int size)
{
if (dev->dma_parms) {
dev->dma_parms->max_segment_size = size;
return 0;
}
return -5;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long dma_get_seg_boundary(struct device *dev)
{
if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
return dev->dma_parms->segment_boundary_mask;
return (~0UL);
}
# 508 "./include/linux/dma-mapping.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
unsigned int page_shift)
{
if (!dev)
return (((u32)~0U) >> page_shift) + 1;
return (dma_get_seg_boundary(dev) >> page_shift) + 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dma_set_seg_boundary(struct device *dev, unsigned long mask)
{
if (dev->dma_parms) {
dev->dma_parms->segment_boundary_mask = mask;
return 0;
}
return -5;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int dma_get_min_align_mask(struct device *dev)
{
if (dev->dma_parms)
return dev->dma_parms->min_align_mask;
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dma_set_min_align_mask(struct device *dev,
unsigned int min_align_mask)
{
if (({ int __ret_warn_on = !!(!dev->dma_parms); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (535), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }))
return -5;
dev->dma_parms->min_align_mask = min_align_mask;
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dma_get_cache_alignment(void)
{



return 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *dmam_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
return dmam_alloc_attrs(dev, size, dma_handle, gfp,
(gfp & (( gfp_t)0x2000u)) ? (1UL << 8) : 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *dma_alloc_wc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
{
unsigned long attrs = (1UL << 2);

if (gfp & (( gfp_t)0x2000u))
attrs |= (1UL << 8);

return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dma_free_wc(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr)
{
return dma_free_attrs(dev, size, cpu_addr, dma_addr,
(1UL << 2));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dma_mmap_wc(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
(1UL << 2));
}
# 32 "./include/linux/skbuff.h" 2
# 1 "./include/linux/netdev_features.h" 1
# 12 "./include/linux/netdev_features.h"
typedef u64 netdev_features_t;

enum {
NETIF_F_SG_BIT,
NETIF_F_IP_CSUM_BIT,
__UNUSED_NETIF_F_1,
NETIF_F_HW_CSUM_BIT,
NETIF_F_IPV6_CSUM_BIT,
NETIF_F_HIGHDMA_BIT,
NETIF_F_FRAGLIST_BIT,
NETIF_F_HW_VLAN_CTAG_TX_BIT,
NETIF_F_HW_VLAN_CTAG_RX_BIT,
NETIF_F_HW_VLAN_CTAG_FILTER_BIT,
NETIF_F_VLAN_CHALLENGED_BIT,
NETIF_F_GSO_BIT,
NETIF_F_LLTX_BIT,

NETIF_F_NETNS_LOCAL_BIT,
NETIF_F_GRO_BIT,
NETIF_F_LRO_BIT,

NETIF_F_GSO_SHIFT,
NETIF_F_TSO_BIT
= NETIF_F_GSO_SHIFT,
NETIF_F_GSO_ROBUST_BIT,
NETIF_F_TSO_ECN_BIT,
NETIF_F_TSO_MANGLEID_BIT,
NETIF_F_TSO6_BIT,
NETIF_F_FSO_BIT,
NETIF_F_GSO_GRE_BIT,
NETIF_F_GSO_GRE_CSUM_BIT,
NETIF_F_GSO_IPXIP4_BIT,
NETIF_F_GSO_IPXIP6_BIT,
NETIF_F_GSO_UDP_TUNNEL_BIT,
NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,
NETIF_F_GSO_PARTIAL_BIT,



NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
NETIF_F_GSO_SCTP_BIT,
NETIF_F_GSO_ESP_BIT,
NETIF_F_GSO_UDP_BIT,
NETIF_F_GSO_UDP_L4_BIT,
NETIF_F_GSO_FRAGLIST_BIT,
NETIF_F_GSO_LAST =
NETIF_F_GSO_FRAGLIST_BIT,

NETIF_F_FCOE_CRC_BIT,
NETIF_F_SCTP_CRC_BIT,
NETIF_F_FCOE_MTU_BIT,
NETIF_F_NTUPLE_BIT,
NETIF_F_RXHASH_BIT,
NETIF_F_RXCSUM_BIT,
NETIF_F_NOCACHE_COPY_BIT,
NETIF_F_LOOPBACK_BIT,
NETIF_F_RXFCS_BIT,
NETIF_F_RXALL_BIT,
NETIF_F_HW_VLAN_STAG_TX_BIT,
NETIF_F_HW_VLAN_STAG_RX_BIT,
NETIF_F_HW_VLAN_STAG_FILTER_BIT,
NETIF_F_HW_L2FW_DOFFLOAD_BIT,

NETIF_F_HW_TC_BIT,
NETIF_F_HW_ESP_BIT,
NETIF_F_HW_ESP_TX_CSUM_BIT,
NETIF_F_RX_UDP_TUNNEL_PORT_BIT,
NETIF_F_HW_TLS_TX_BIT,
NETIF_F_HW_TLS_RX_BIT,

NETIF_F_GRO_HW_BIT,
NETIF_F_HW_TLS_RECORD_BIT,
NETIF_F_GRO_FRAGLIST_BIT,

NETIF_F_HW_MACSEC_BIT,
NETIF_F_GRO_UDP_FWD_BIT,

NETIF_F_HW_HSR_TAG_INS_BIT,
NETIF_F_HW_HSR_TAG_RM_BIT,
NETIF_F_HW_HSR_FWD_BIT,
NETIF_F_HW_HSR_DUP_BIT,
# 101 "./include/linux/netdev_features.h"
NETDEV_FEATURE_COUNT
};
# 174 "./include/linux/netdev_features.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int find_next_netdev_feature(u64 feature, unsigned long start)
{



feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));

return fls64(feature) - 1;
}
# 33 "./include/linux/skbuff.h" 2

# 1 "./include/linux/sched/clock.h" 1
# 15 "./include/linux/sched/clock.h"
extern unsigned long long __attribute__((patchable_function_entry(0, 0))) sched_clock(void);




extern u64 running_clock(void);
extern u64 sched_clock_cpu(int cpu);


extern void sched_clock_init(void);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sched_clock_tick(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_sched_clock_stable(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sched_clock_idle_sleep_event(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sched_clock_idle_wakeup_event(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 cpu_clock(int cpu)
{
return sched_clock();
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 local_clock(void)
{
return sched_clock();
}
# 94 "./include/linux/sched/clock.h"
extern void enable_sched_clock_irqtime(void);
extern void disable_sched_clock_irqtime(void);
# 35 "./include/linux/skbuff.h" 2

# 1 "./include/linux/splice.h" 1
# 12 "./include/linux/splice.h"
# 1 "./include/linux/pipe_fs_i.h" 1
# 26 "./include/linux/pipe_fs_i.h"
struct pipe_buffer {
struct page *page;
unsigned int offset, len;
const struct pipe_buf_operations *ops;
unsigned int flags;
unsigned long private;
};
# 58 "./include/linux/pipe_fs_i.h"
struct pipe_inode_info {
struct mutex mutex;
wait_queue_head_t rd_wait, wr_wait;
unsigned int head;
unsigned int tail;
unsigned int max_usage;
unsigned int ring_size;



unsigned int nr_accounted;
unsigned int readers;
unsigned int writers;
unsigned int files;
unsigned int r_counter;
unsigned int w_counter;
unsigned int poll_usage;
struct page *tmp_page;
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
struct pipe_buffer *bufs;
struct user_struct *user;



};
# 95 "./include/linux/pipe_fs_i.h"
struct pipe_buf_operations {







int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);





void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
# 119 "./include/linux/pipe_fs_i.h"
bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *);




bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
};






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pipe_empty(unsigned int head, unsigned int tail)
{
return head == tail;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int pipe_occupancy(unsigned int head, unsigned int tail)
{
return head - tail;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pipe_full(unsigned int head, unsigned int tail,
unsigned int limit)
{
return pipe_occupancy(head, tail) >= limit;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int pipe_space_for_user(unsigned int head, unsigned int tail,
struct pipe_inode_info *pipe)
{
unsigned int p_occupancy, p_space;

p_occupancy = pipe_occupancy(head, tail);
if (p_occupancy >= pipe->max_usage)
return 0;
p_space = pipe->ring_size - p_occupancy;
if (p_space > pipe->max_usage)
p_space = pipe->max_usage;
return p_space;
}
# 186 "./include/linux/pipe_fs_i.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) bool pipe_buf_get(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
return buf->ops->get(pipe, buf);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
const struct pipe_buf_operations *ops = buf->ops;

buf->ops = ((void *)0);
ops->release(pipe, buf);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pipe_buf_confirm(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
if (!buf->ops->confirm)
return 0;
return buf->ops->confirm(pipe, buf);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pipe_buf_try_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
if (!buf->ops->try_steal)
return false;
return buf->ops->try_steal(pipe, buf);
}






void pipe_lock(struct pipe_inode_info *);
void pipe_unlock(struct pipe_inode_info *);
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);


void pipe_wait_readable(struct pipe_inode_info *);
void pipe_wait_writable(struct pipe_inode_info *);

struct pipe_inode_info *alloc_pipe_info(void);
void free_pipe_info(struct pipe_inode_info *);


bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
bool generic_pipe_buf_try_steal(struct pipe_inode_info *, struct pipe_buffer *);
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);

extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
# 267 "./include/linux/pipe_fs_i.h"
long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice);

int create_pipe_files(struct file **, int);
unsigned int round_pipe_size(unsigned long size);
# 13 "./include/linux/splice.h" 2
# 29 "./include/linux/splice.h"
struct splice_desc {
size_t total_len;
unsigned int len;
unsigned int flags;



union {
void *userptr;
struct file *file;
void *data;
} u;
loff_t pos;
loff_t *opos;
size_t num_spliced;
bool need_wakeup;
};

struct partial_page {
unsigned int offset;
unsigned int len;
unsigned long private;
};




struct splice_pipe_desc {
struct page **pages;
struct partial_page *partial;
int nr_pages;
unsigned int nr_pages_max;
const struct pipe_buf_operations *ops;
void (*spd_release)(struct splice_pipe_desc *, unsigned int);
};

typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
struct splice_desc *);
typedef int (splice_direct_actor)(struct pipe_inode_info *,
struct splice_desc *);

extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
loff_t *, size_t, unsigned int,
splice_actor *);
extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
struct splice_desc *, splice_actor *);
extern ssize_t splice_to_pipe(struct pipe_inode_info *,
struct splice_pipe_desc *);
extern ssize_t add_to_pipe(struct pipe_inode_info *,
struct pipe_buffer *);
extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
splice_direct_actor *);
extern long do_splice(struct file *in, loff_t *off_in,
struct file *out, loff_t *off_out,
size_t len, unsigned int flags);

extern long do_tee(struct file *in, struct file *out, size_t len,
unsigned int flags);




extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
extern void splice_shrink_spd(struct splice_pipe_desc *);

extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
extern const struct pipe_buf_operations default_pipe_buf_ops;
# 37 "./include/linux/skbuff.h" 2

# 1 "./include/uapi/linux/if_packet.h" 1







struct sockaddr_pkt {
unsigned short spkt_family;
unsigned char spkt_device[14];
__be16 spkt_protocol;
};

struct sockaddr_ll {
unsigned short sll_family;
__be16 sll_protocol;
int sll_ifindex;
unsigned short sll_hatype;
unsigned char sll_pkttype;
unsigned char sll_halen;
unsigned char sll_addr[8];
};
# 75 "./include/uapi/linux/if_packet.h"
struct tpacket_stats {
unsigned int tp_packets;
unsigned int tp_drops;
};

struct tpacket_stats_v3 {
unsigned int tp_packets;
unsigned int tp_drops;
unsigned int tp_freeze_q_cnt;
};

struct tpacket_rollover_stats {
__u64 __attribute__((aligned(8))) tp_all;
__u64 __attribute__((aligned(8))) tp_huge;
__u64 __attribute__((aligned(8))) tp_failed;
};

union tpacket_stats_u {
struct tpacket_stats stats1;
struct tpacket_stats_v3 stats3;
};

struct tpacket_auxdata {
__u32 tp_status;
__u32 tp_len;
__u32 tp_snaplen;
__u16 tp_mac;
__u16 tp_net;
__u16 tp_vlan_tci;
__u16 tp_vlan_tpid;
};
# 132 "./include/uapi/linux/if_packet.h"
struct tpacket_hdr {
unsigned long tp_status;
unsigned int tp_len;
unsigned int tp_snaplen;
unsigned short tp_mac;
unsigned short tp_net;
unsigned int tp_sec;
unsigned int tp_usec;
};





struct tpacket2_hdr {
__u32 tp_status;
__u32 tp_len;
__u32 tp_snaplen;
__u16 tp_mac;
__u16 tp_net;
__u32 tp_sec;
__u32 tp_nsec;
__u16 tp_vlan_tci;
__u16 tp_vlan_tpid;
__u8 tp_padding[4];
};

struct tpacket_hdr_variant1 {
__u32 tp_rxhash;
__u32 tp_vlan_tci;
__u16 tp_vlan_tpid;
__u16 tp_padding;
};

struct tpacket3_hdr {
__u32 tp_next_offset;
__u32 tp_sec;
__u32 tp_nsec;
__u32 tp_snaplen;
__u32 tp_len;
__u32 tp_status;
__u16 tp_mac;
__u16 tp_net;

union {
struct tpacket_hdr_variant1 hv1;
};
__u8 tp_padding[8];
};

struct tpacket_bd_ts {
unsigned int ts_sec;
union {
unsigned int ts_usec;
unsigned int ts_nsec;
};
};

struct tpacket_hdr_v1 {
__u32 block_status;
__u32 num_pkts;
__u32 offset_to_first_pkt;




__u32 blk_len;
# 209 "./include/uapi/linux/if_packet.h"
__u64 __attribute__((aligned(8))) seq_num;
# 236 "./include/uapi/linux/if_packet.h"
struct tpacket_bd_ts ts_first_pkt, ts_last_pkt;
};

union tpacket_bd_header_u {
struct tpacket_hdr_v1 bh1;
};

struct tpacket_block_desc {
__u32 version;
__u32 offset_to_priv;
union tpacket_bd_header_u hdr;
};




enum tpacket_versions {
TPACKET_V1,
TPACKET_V2,
TPACKET_V3
};
# 271 "./include/uapi/linux/if_packet.h"
struct tpacket_req {
unsigned int tp_block_size;
unsigned int tp_block_nr;
unsigned int tp_frame_size;
unsigned int tp_frame_nr;
};

struct tpacket_req3 {
unsigned int tp_block_size;
unsigned int tp_block_nr;
unsigned int tp_frame_size;
unsigned int tp_frame_nr;
unsigned int tp_retire_blk_tov;
unsigned int tp_sizeof_priv;
unsigned int tp_feature_req_word;
};

union tpacket_req_u {
struct tpacket_req req;
struct tpacket_req3 req3;
};

struct packet_mreq {
int mr_ifindex;
unsigned short mr_type;
unsigned short mr_alen;
unsigned char mr_address[8];
};

struct fanout_args {

__u16 id;
__u16 type_flags;




__u32 max_num_members;
};
# 39 "./include/linux/skbuff.h" 2


# 1 "./include/net/page_pool.h" 1
# 34 "./include/net/page_pool.h"
# 1 "./include/linux/ptr_ring.h" 1
# 27 "./include/linux/ptr_ring.h"
# 1 "./arch/riscv/include/generated/uapi/asm/errno.h" 1
# 28 "./include/linux/ptr_ring.h" 2


struct ptr_ring {
int producer __attribute__((__aligned__((1 << 6))));
spinlock_t producer_lock;
int consumer_head __attribute__((__aligned__((1 << 6))));
int consumer_tail;
spinlock_t consumer_lock;


int size __attribute__((__aligned__((1 << 6))));
int batch;
void **queue;
};







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __ptr_ring_full(struct ptr_ring *r)
{
return r->queue[r->producer];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ptr_ring_full(struct ptr_ring *r)
{
bool ret;

spin_lock(&r->producer_lock);
ret = __ptr_ring_full(r);
spin_unlock(&r->producer_lock);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ptr_ring_full_irq(struct ptr_ring *r)
{
bool ret;

spin_lock_irq(&r->producer_lock);
ret = __ptr_ring_full(r);
spin_unlock_irq(&r->producer_lock);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ptr_ring_full_any(struct ptr_ring *r)
{
unsigned long flags;
bool ret;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&r->producer_lock)); } while (0); } while (0);
ret = __ptr_ring_full(r);
spin_unlock_irqrestore(&r->producer_lock, flags);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ptr_ring_full_bh(struct ptr_ring *r)
{
bool ret;

spin_lock_bh(&r->producer_lock);
ret = __ptr_ring_full(r);
spin_unlock_bh(&r->producer_lock);

return ret;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
{
if (__builtin_expect(!!(!r->size), 0) || r->queue[r->producer])
return -28;



do { do { } while (0); __asm__ __volatile__ ("fence " "w" "," "w" : : : "memory"); } while (0);

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_260(void) ; if (!((sizeof(r->queue[r->producer++]) == sizeof(char) || sizeof(r->queue[r->producer++]) == sizeof(short) || sizeof(r->queue[r->producer++]) == sizeof(int) || sizeof(r->queue[r->producer++]) == sizeof(long)) || sizeof(r->queue[r->producer++]) == sizeof(long long))) __compiletime_assert_260(); } while (0); do { *(volatile typeof(r->queue[r->producer++]) *)&(r->queue[r->producer++]) = (ptr); } while (0); } while (0);
if (__builtin_expect(!!(r->producer >= r->size), 0))
r->producer = 0;
return 0;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptr_ring_produce(struct ptr_ring *r, void *ptr)
{
int ret;

spin_lock(&r->producer_lock);
ret = __ptr_ring_produce(r, ptr);
spin_unlock(&r->producer_lock);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr)
{
int ret;

spin_lock_irq(&r->producer_lock);
ret = __ptr_ring_produce(r, ptr);
spin_unlock_irq(&r->producer_lock);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptr_ring_produce_any(struct ptr_ring *r, void *ptr)
{
unsigned long flags;
int ret;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&r->producer_lock)); } while (0); } while (0);
ret = __ptr_ring_produce(r, ptr);
spin_unlock_irqrestore(&r->producer_lock, flags);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
{
int ret;

spin_lock_bh(&r->producer_lock);
ret = __ptr_ring_produce(r, ptr);
spin_unlock_bh(&r->producer_lock);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *__ptr_ring_peek(struct ptr_ring *r)
{
if (__builtin_expect(!!(r->size), 1))
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_261(void) ; if (!((sizeof(r->queue[r->consumer_head]) == sizeof(char) || sizeof(r->queue[r->consumer_head]) == sizeof(short) || sizeof(r->queue[r->consumer_head]) == sizeof(int) || sizeof(r->queue[r->consumer_head]) == sizeof(long)) || sizeof(r->queue[r->consumer_head]) == sizeof(long long))) __compiletime_assert_261(); } while (0); (*(const volatile typeof( _Generic((r->queue[r->consumer_head]), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (r->queue[r->consumer_head]))) *)&(r->queue[r->consumer_head])); });
return ((void *)0);
}
# 194 "./include/linux/ptr_ring.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __ptr_ring_empty(struct ptr_ring *r)
{
if (__builtin_expect(!!(r->size), 1))
return !r->queue[({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_262(void) ; if (!((sizeof(r->consumer_head) == sizeof(char) || sizeof(r->consumer_head) == sizeof(short) || sizeof(r->consumer_head) == sizeof(int) || sizeof(r->consumer_head) == sizeof(long)) || sizeof(r->consumer_head) == sizeof(long long))) __compiletime_assert_262(); } while (0); (*(const volatile typeof( _Generic((r->consumer_head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (r->consumer_head))) *)&(r->consumer_head)); })];
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ptr_ring_empty(struct ptr_ring *r)
{
bool ret;

spin_lock(&r->consumer_lock);
ret = __ptr_ring_empty(r);
spin_unlock(&r->consumer_lock);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ptr_ring_empty_irq(struct ptr_ring *r)
{
bool ret;

spin_lock_irq(&r->consumer_lock);
ret = __ptr_ring_empty(r);
spin_unlock_irq(&r->consumer_lock);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ptr_ring_empty_any(struct ptr_ring *r)
{
unsigned long flags;
bool ret;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&r->consumer_lock)); } while (0); } while (0);
ret = __ptr_ring_empty(r);
spin_unlock_irqrestore(&r->consumer_lock, flags);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ptr_ring_empty_bh(struct ptr_ring *r)
{
bool ret;

spin_lock_bh(&r->consumer_lock);
ret = __ptr_ring_empty(r);
spin_unlock_bh(&r->consumer_lock);

return ret;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __ptr_ring_discard_one(struct ptr_ring *r)
{
# 264 "./include/linux/ptr_ring.h"
int consumer_head = r->consumer_head;
int head = consumer_head++;






if (__builtin_expect(!!(consumer_head - r->consumer_tail >= r->batch || consumer_head >= r->size), 0)) {






while (__builtin_expect(!!(head >= r->consumer_tail), 1))
r->queue[head--] = ((void *)0);
r->consumer_tail = consumer_head;
}
if (__builtin_expect(!!(consumer_head >= r->size), 0)) {
consumer_head = 0;
r->consumer_tail = 0;
}

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_263(void) ; if (!((sizeof(r->consumer_head) == sizeof(char) || sizeof(r->consumer_head) == sizeof(short) || sizeof(r->consumer_head) == sizeof(int) || sizeof(r->consumer_head) == sizeof(long)) || sizeof(r->consumer_head) == sizeof(long long))) __compiletime_assert_263(); } while (0); do { *(volatile typeof(r->consumer_head) *)&(r->consumer_head) = (consumer_head); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *__ptr_ring_consume(struct ptr_ring *r)
{
void *ptr;





ptr = __ptr_ring_peek(r);
if (ptr)
__ptr_ring_discard_one(r);

return ptr;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __ptr_ring_consume_batched(struct ptr_ring *r,
void **array, int n)
{
void *ptr;
int i;

for (i = 0; i < n; i++) {
ptr = __ptr_ring_consume(r);
if (!ptr)
break;
array[i] = ptr;
}

return i;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *ptr_ring_consume(struct ptr_ring *r)
{
void *ptr;

spin_lock(&r->consumer_lock);
ptr = __ptr_ring_consume(r);
spin_unlock(&r->consumer_lock);

return ptr;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *ptr_ring_consume_irq(struct ptr_ring *r)
{
void *ptr;

spin_lock_irq(&r->consumer_lock);
ptr = __ptr_ring_consume(r);
spin_unlock_irq(&r->consumer_lock);

return ptr;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *ptr_ring_consume_any(struct ptr_ring *r)
{
unsigned long flags;
void *ptr;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&r->consumer_lock)); } while (0); } while (0);
ptr = __ptr_ring_consume(r);
spin_unlock_irqrestore(&r->consumer_lock, flags);

return ptr;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *ptr_ring_consume_bh(struct ptr_ring *r)
{
void *ptr;

spin_lock_bh(&r->consumer_lock);
ptr = __ptr_ring_consume(r);
spin_unlock_bh(&r->consumer_lock);

return ptr;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptr_ring_consume_batched(struct ptr_ring *r,
void **array, int n)
{
int ret;

spin_lock(&r->consumer_lock);
ret = __ptr_ring_consume_batched(r, array, n);
spin_unlock(&r->consumer_lock);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptr_ring_consume_batched_irq(struct ptr_ring *r,
void **array, int n)
{
int ret;

spin_lock_irq(&r->consumer_lock);
ret = __ptr_ring_consume_batched(r, array, n);
spin_unlock_irq(&r->consumer_lock);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptr_ring_consume_batched_any(struct ptr_ring *r,
void **array, int n)
{
unsigned long flags;
int ret;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&r->consumer_lock)); } while (0); } while (0);
ret = __ptr_ring_consume_batched(r, array, n);
spin_unlock_irqrestore(&r->consumer_lock, flags);

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptr_ring_consume_batched_bh(struct ptr_ring *r,
void **array, int n)
{
int ret;

spin_lock_bh(&r->consumer_lock);
ret = __ptr_ring_consume_batched(r, array, n);
spin_unlock_bh(&r->consumer_lock);

return ret;
}
# 467 "./include/linux/ptr_ring.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
{
if (size > (1UL << (11 + (12) - 1)) / sizeof(void *))
return ((void *)0);
return kvmalloc_array(size, sizeof(void *), gfp | (( gfp_t)0x100u));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __ptr_ring_set_size(struct ptr_ring *r, int size)
{
r->size = size;
r->batch = (1 << 6) * 2 / sizeof(*(r->queue));





if (r->batch > r->size / 2 || !r->batch)
r->batch = 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
{
r->queue = __ptr_ring_init_queue_alloc(size, gfp);
if (!r->queue)
return -12;

__ptr_ring_set_size(r, size);
r->producer = r->consumer_head = r->consumer_tail = 0;
do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(&r->producer_lock), "&r->producer_lock", &__key, LD_WAIT_CONFIG); } while (0);
do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(&r->consumer_lock), "&r->consumer_lock", &__key, LD_WAIT_CONFIG); } while (0);

return 0;
}
# 511 "./include/linux/ptr_ring.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
void (*destroy)(void *))
{
unsigned long flags;
int head;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&r->consumer_lock)); } while (0); } while (0);
spin_lock(&r->producer_lock);

if (!r->size)
goto done;





head = r->consumer_head - 1;
while (__builtin_expect(!!(head >= r->consumer_tail), 1))
r->queue[head--] = ((void *)0);
r->consumer_tail = r->consumer_head;





while (n) {
head = r->consumer_head - 1;
if (head < 0)
head = r->size - 1;
if (r->queue[head]) {

goto done;
}
r->queue[head] = batch[--n];
r->consumer_tail = head;

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_264(void) ; if (!((sizeof(r->consumer_head) == sizeof(char) || sizeof(r->consumer_head) == sizeof(short) || sizeof(r->consumer_head) == sizeof(int) || sizeof(r->consumer_head) == sizeof(long)) || sizeof(r->consumer_head) == sizeof(long long))) __compiletime_assert_264(); } while (0); do { *(volatile typeof(r->consumer_head) *)&(r->consumer_head) = (head); } while (0); } while (0);
}

done:

while (n)
destroy(batch[--n]);
spin_unlock(&r->producer_lock);
spin_unlock_irqrestore(&r->consumer_lock, flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
int size, gfp_t gfp,
void (*destroy)(void *))
{
int producer = 0;
void **old;
void *ptr;

while ((ptr = __ptr_ring_consume(r)))
if (producer < size)
queue[producer++] = ptr;
else if (destroy)
destroy(ptr);

if (producer >= size)
producer = 0;
__ptr_ring_set_size(r, size);
r->producer = producer;
r->consumer_head = 0;
r->consumer_tail = 0;
old = r->queue;
r->queue = queue;

return old;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
void (*destroy)(void *))
{
unsigned long flags;
void **queue = __ptr_ring_init_queue_alloc(size, gfp);
void **old;

if (!queue)
return -12;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&(r)->consumer_lock)); } while (0); } while (0);
spin_lock(&(r)->producer_lock);

old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);

spin_unlock(&(r)->producer_lock);
spin_unlock_irqrestore(&(r)->consumer_lock, flags);

kvfree(old);

return 0;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptr_ring_resize_multiple(struct ptr_ring **rings,
unsigned int nrings,
int size,
gfp_t gfp, void (*destroy)(void *))
{
unsigned long flags;
void ***queues;
int i;

queues = kmalloc_array(nrings, sizeof(*queues), gfp);
if (!queues)
goto noqueues;

for (i = 0; i < nrings; ++i) {
queues[i] = __ptr_ring_init_queue_alloc(size, gfp);
if (!queues[i])
goto nomem;
}

for (i = 0; i < nrings; ++i) {
do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&(rings[i])->consumer_lock)); } while (0); } while (0);
spin_lock(&(rings[i])->producer_lock);
queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
size, gfp, destroy);
spin_unlock(&(rings[i])->producer_lock);
spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
}

for (i = 0; i < nrings; ++i)
kvfree(queues[i]);

kfree(queues);

return 0;

nomem:
while (--i >= 0)
kvfree(queues[i]);

kfree(queues);

noqueues:
return -12;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
{
void *ptr;

if (destroy)
while ((ptr = ptr_ring_consume(r)))
destroy(ptr);
kvfree(r->queue);
}
# 35 "./include/net/page_pool.h" 2
# 69 "./include/net/page_pool.h"
struct pp_alloc_cache {
u32 count;
struct page *cache[128];
};

struct page_pool_params {
unsigned int flags;
unsigned int order;
unsigned int pool_size;
int nid;
struct device *dev;
enum dma_data_direction dma_dir;
unsigned int max_len;
unsigned int offset;
void (*init_callback)(struct page *page, void *arg);
void *init_arg;
};
# 129 "./include/net/page_pool.h"
struct page_pool {
struct page_pool_params p;

struct delayed_work release_dw;
void (*disconnect)(void *);
unsigned long defer_start;
unsigned long defer_warn;

u32 pages_state_hold_cnt;
unsigned int frag_offset;
struct page *frag_page;
long frag_users;





u32 xdp_mem_id;
# 161 "./include/net/page_pool.h"
struct pp_alloc_cache alloc __attribute__((__aligned__((1 << 6))));
# 174 "./include/net/page_pool.h"
struct ptr_ring ring;





atomic_t pages_state_release_cnt;





refcount_t user_cnt;

u64 destroy_cnt;
};

struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
{
gfp_t gfp = (((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)) | (( gfp_t)0x2000u));

return page_pool_alloc_pages(pool, gfp);
}

struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
unsigned int size, gfp_t gfp);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
unsigned int *offset,
unsigned int size)
{
gfp_t gfp = (((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)) | (( gfp_t)0x2000u));

return page_pool_alloc_frag(pool, offset, size, gfp);
}




static
inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
{
return pool->p.dma_dir;
}

bool page_pool_return_skb_page(struct page *page);

struct page_pool *page_pool_create(const struct page_pool_params *params);

struct xdp_mem_info;


void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
struct xdp_mem_info *mem);
void page_pool_release_page(struct page_pool *pool, struct page *page);
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count);
# 255 "./include/net/page_pool.h"
void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size,
bool allow_direct);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_pool_fragment_page(struct page *page, long nr)
{
atomic_long_set(&page->pp_frag_count, nr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long page_pool_defrag_page(struct page *page, long nr)
{
long ret;
# 277 "./include/net/page_pool.h"
if (atomic_long_read(&page->pp_frag_count) == nr)
return 0;

ret = atomic_long_sub_return(nr, &page->pp_frag_count);
({ int __ret_warn_on = !!(ret < 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/page_pool.h"), "i" (281), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool page_pool_is_last_frag(struct page_pool *pool,
struct page *page)
{

return !(pool->p.flags & ((((1UL))) << (2))) ||
(page_pool_defrag_page(page, 1) == 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_pool_put_page(struct page_pool *pool,
struct page *page,
unsigned int dma_sync_size,
bool allow_direct)
{




if (!page_pool_is_last_frag(pool, page))
return;

page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_pool_put_full_page(struct page_pool *pool,
struct page *page, bool allow_direct)
{
page_pool_put_page(pool, page, -1, allow_direct);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_pool_recycle_direct(struct page_pool *pool,
struct page *page)
{
page_pool_put_full_page(pool, page, true);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) dma_addr_t page_pool_get_dma_addr(struct page *page)
{
dma_addr_t ret = page->dma_addr;

if ((sizeof(dma_addr_t) > sizeof(unsigned long)))
ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;

return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
{
page->dma_addr = addr;
if ((sizeof(dma_addr_t) > sizeof(unsigned long)))
page->dma_addr_upper = ((u32)(((addr) >> 16) >> 16));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_page_pool_compiled_in(void)
{

return true;



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool page_pool_put(struct page_pool *pool)
{
return refcount_dec_and_test(&pool->user_cnt);
}


void page_pool_update_nid(struct page_pool *pool, int new_nid);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_pool_nid_changed(struct page_pool *pool, int new_nid)
{
if (__builtin_expect(!!(pool->p.nid != new_nid), 0))
page_pool_update_nid(pool, new_nid);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_pool_ring_lock(struct page_pool *pool)

{
if (((preempt_count() & (((1UL << (8))-1) << (0 + 8))) & (1UL << (0 + 8))))
spin_lock(&pool->ring.producer_lock);
else
spin_lock_bh(&pool->ring.producer_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_pool_ring_unlock(struct page_pool *pool)

{
if (((preempt_count() & (((1UL << (8))-1) << (0 + 8))) & (1UL << (0 + 8))))
spin_unlock(&pool->ring.producer_lock);
else
spin_unlock_bh(&pool->ring.producer_lock);
}
# 42 "./include/linux/skbuff.h" 2
# 243 "./include/linux/skbuff.h"
struct ahash_request;
struct net_device;
struct scatterlist;
struct pipe_inode_info;
struct iov_iter;
struct napi_struct;
struct bpf_prog;
union bpf_attr;
struct skb_ext;
# 297 "./include/linux/skbuff.h"
struct sk_buff_head {

union { struct { struct sk_buff *next; struct sk_buff *prev; } ; struct sk_buff_list { struct sk_buff *next; struct sk_buff *prev; } list; };




__u32 qlen;
spinlock_t lock;
};

struct sk_buff;







enum skb_drop_reason {
SKB_NOT_DROPPED_YET = 0,
SKB_DROP_REASON_NOT_SPECIFIED,
SKB_DROP_REASON_NO_SOCKET,
SKB_DROP_REASON_PKT_TOO_SMALL,
SKB_DROP_REASON_TCP_CSUM,
SKB_DROP_REASON_SOCKET_FILTER,
SKB_DROP_REASON_UDP_CSUM,
SKB_DROP_REASON_NETFILTER_DROP,
SKB_DROP_REASON_OTHERHOST,



SKB_DROP_REASON_IP_CSUM,
SKB_DROP_REASON_IP_INHDR,



SKB_DROP_REASON_IP_RPFILTER,




SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST,



SKB_DROP_REASON_XFRM_POLICY,
SKB_DROP_REASON_IP_NOPROTO,
SKB_DROP_REASON_SOCKET_RCVBUFF,
SKB_DROP_REASON_PROTO_MEM,



SKB_DROP_REASON_TCP_MD5NOTFOUND,



SKB_DROP_REASON_TCP_MD5UNEXPECTED,



SKB_DROP_REASON_TCP_MD5FAILURE,



SKB_DROP_REASON_SOCKET_BACKLOG,



SKB_DROP_REASON_TCP_FLAGS,
SKB_DROP_REASON_TCP_ZEROWINDOW,


SKB_DROP_REASON_TCP_OLD_DATA,




SKB_DROP_REASON_TCP_OVERWINDOW,




SKB_DROP_REASON_TCP_OFOMERGE,



SKB_DROP_REASON_IP_OUTNOROUTES,
SKB_DROP_REASON_BPF_CGROUP_EGRESS,



SKB_DROP_REASON_IPV6DISABLED,
SKB_DROP_REASON_NEIGH_CREATEFAIL,


SKB_DROP_REASON_NEIGH_FAILED,
SKB_DROP_REASON_NEIGH_QUEUEFULL,


SKB_DROP_REASON_NEIGH_DEAD,
SKB_DROP_REASON_TC_EGRESS,
SKB_DROP_REASON_QDISC_DROP,



SKB_DROP_REASON_CPU_BACKLOG,





SKB_DROP_REASON_XDP,
SKB_DROP_REASON_TC_INGRESS,
SKB_DROP_REASON_PTYPE_ABSENT,




SKB_DROP_REASON_SKB_CSUM,


SKB_DROP_REASON_SKB_GSO_SEG,
SKB_DROP_REASON_SKB_UCOPY_FAULT,




SKB_DROP_REASON_DEV_HDR,






SKB_DROP_REASON_DEV_READY,
SKB_DROP_REASON_FULL_RING,
SKB_DROP_REASON_NOMEM,
SKB_DROP_REASON_HDR_TRUNC,




SKB_DROP_REASON_TAP_FILTER,



SKB_DROP_REASON_TAP_TXFILTER,


SKB_DROP_REASON_MAX,
};
# 462 "./include/linux/skbuff.h"
extern int sysctl_max_skb_frags;






typedef struct bio_vec skb_frag_t;





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int skb_frag_size(const skb_frag_t *frag)
{
return frag->bv_len;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
{
frag->bv_len = size;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_frag_size_add(skb_frag_t *frag, int delta)
{
frag->bv_len += delta;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_frag_size_sub(skb_frag_t *frag, int delta)
{
frag->bv_len -= delta;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_frag_must_loop(struct page *p)
{




return false;
}
# 566 "./include/linux/skbuff.h"
struct skb_shared_hwtstamps {
ktime_t hwtstamp;
};


enum {

SKBTX_HW_TSTAMP = 1 << 0,


SKBTX_SW_TSTAMP = 1 << 1,


SKBTX_IN_PROGRESS = 1 << 2,


SKBTX_WIFI_STATUS = 1 << 4,


SKBTX_SCHED_TSTAMP = 1 << 6,
};






enum {

SKBFL_ZEROCOPY_ENABLE = ((((1UL))) << (0)),






SKBFL_SHARED_FRAG = ((((1UL))) << (1)),




SKBFL_PURE_ZEROCOPY = ((((1UL))) << (2)),
};
# 621 "./include/linux/skbuff.h"
struct ubuf_info {
void (*callback)(struct sk_buff *, struct ubuf_info *,
bool zerocopy_success);
union {
struct {
unsigned long desc;
void *ctx;
};
struct {
u32 id;
u16 len;
u16 zerocopy:1;
u32 bytelen;
};
};
refcount_t refcnt;
u8 flags;

struct mmpin {
struct user_struct *user;
unsigned int num_pg;
} mmp;
};



int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
void mm_unaccount_pinned_pages(struct mmpin *mmp);

struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size);
struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
struct ubuf_info *uarg);

void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);

void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
bool success);

int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
struct ubuf_info *uarg);




struct skb_shared_info {
__u8 flags;
__u8 meta_len;
__u8 nr_frags;
__u8 tx_flags;
unsigned short gso_size;

unsigned short gso_segs;
struct sk_buff *frag_list;
struct skb_shared_hwtstamps hwtstamps;
unsigned int gso_type;
u32 tskey;




atomic_t dataref;
unsigned int xdp_frags_size;



void * destructor_arg;


skb_frag_t frags[(65536/((1UL) << (12)) + 1)];
};
# 709 "./include/linux/skbuff.h"
enum {
SKB_FCLONE_UNAVAILABLE,
SKB_FCLONE_ORIG,
SKB_FCLONE_CLONE,
};

enum {
SKB_GSO_TCPV4 = 1 << 0,


SKB_GSO_DODGY = 1 << 1,


SKB_GSO_TCP_ECN = 1 << 2,

SKB_GSO_TCP_FIXEDID = 1 << 3,

SKB_GSO_TCPV6 = 1 << 4,

SKB_GSO_FCOE = 1 << 5,

SKB_GSO_GRE = 1 << 6,

SKB_GSO_GRE_CSUM = 1 << 7,

SKB_GSO_IPXIP4 = 1 << 8,

SKB_GSO_IPXIP6 = 1 << 9,

SKB_GSO_UDP_TUNNEL = 1 << 10,

SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,

SKB_GSO_PARTIAL = 1 << 12,

SKB_GSO_TUNNEL_REMCSUM = 1 << 13,

SKB_GSO_SCTP = 1 << 14,

SKB_GSO_ESP = 1 << 15,

SKB_GSO_UDP = 1 << 16,

SKB_GSO_UDP_L4 = 1 << 17,

SKB_GSO_FRAGLIST = 1 << 18,
};






typedef unsigned int sk_buff_data_t;
# 880 "./include/linux/skbuff.h"
struct sk_buff {
union {
struct {

struct sk_buff *next;
struct sk_buff *prev;

union {
struct net_device *dev;




unsigned long dev_scratch;
};
};
struct rb_node rbnode;
struct list_head list;
struct llist_node ll_node;
};

union {
struct sock *sk;
int ip_defrag_offset;
};

union {
ktime_t tstamp;
u64 skb_mstamp_ns;
};






char cb[48] __attribute__((__aligned__(8)));

union {
struct {
unsigned long _skb_refdst;
void (*destructor)(struct sk_buff *skb);
};
struct list_head tcp_tsorted_anchor;

unsigned long _sk_redir;

};




unsigned int len,
data_len;
__u16 mac_len,
hdr_len;




__u16 queue_mapping;
# 951 "./include/linux/skbuff.h"
__u8 __cloned_offset[0];

__u8 cloned:1,
nohdr:1,
fclone:2,
peeked:1,
head_frag:1,
pfmemalloc:1,
pp_recycle:1;







union { struct { __u8 __pkt_type_offset[0]; __u8 pkt_type:3; __u8 ignore_df:1; __u8 nf_trace:1; __u8 ip_summed:2; __u8 ooo_okay:1; __u8 l4_hash:1; __u8 sw_hash:1; __u8 wifi_acked_valid:1; __u8 wifi_acked:1; __u8 no_fcs:1; __u8 encapsulation:1; __u8 encap_hdr_csum:1; __u8 csum_valid:1; __u8 __pkt_vlan_present_offset[0]; __u8 vlan_present:1; __u8 csum_complete_sw:1; __u8 csum_level:2; __u8 dst_pending_confirm:1; __u8 mono_delivery_time:1; __u8 ndisc_nodetype:2; __u8 ipvs_property:1; __u8 inner_protocol_type:1; __u8 remcsum_offload:1; __u8 redirected:1; __u8 slow_gro:1; __u8 csum_not_inet:1; union { __wsum csum; struct { __u16 csum_start; __u16 csum_offset; }; }; __u32 priority; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; union { unsigned int napi_id; unsigned int sender_cpu; }; union { __u32 mark; __u32 reserved_tailroom; }; union { __be16 inner_protocol; __u8 inner_ipproto; }; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; } ; struct { __u8 __pkt_type_offset[0]; __u8 pkt_type:3; __u8 ignore_df:1; __u8 nf_trace:1; __u8 ip_summed:2; __u8 ooo_okay:1; __u8 l4_hash:1; __u8 sw_hash:1; __u8 wifi_acked_valid:1; __u8 wifi_acked:1; __u8 no_fcs:1; __u8 encapsulation:1; __u8 encap_hdr_csum:1; __u8 csum_valid:1; __u8 __pkt_vlan_present_offset[0]; __u8 vlan_present:1; __u8 csum_complete_sw:1; __u8 csum_level:2; __u8 dst_pending_confirm:1; __u8 mono_delivery_time:1; __u8 ndisc_nodetype:2; __u8 ipvs_property:1; __u8 inner_protocol_type:1; __u8 remcsum_offload:1; __u8 redirected:1; __u8 slow_gro:1; __u8 csum_not_inet:1; union { __wsum csum; struct { __u16 csum_start; __u16 csum_offset; }; }; __u32 priority; int skb_iif; __u32 hash; __be16 vlan_proto; __u16 vlan_tci; union { unsigned int napi_id; unsigned int sender_cpu; }; union { __u32 mark; __u32 reserved_tailroom; }; union { __be16 inner_protocol; __u8 inner_ipproto; }; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; } headers; };
# 1076 "./include/linux/skbuff.h"
sk_buff_data_t tail;
sk_buff_data_t end;
unsigned char *head,
*data;
unsigned int truesize;
refcount_t users;





};
# 1124 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_pfmemalloc(const struct sk_buff *skb)
{
return __builtin_expect(!!(skb->pfmemalloc), 0);
}
# 1142 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dst_entry *skb_dst(const struct sk_buff *skb)
{



({ int __ret_warn_on = !!((skb->_skb_refdst & 1UL) && !rcu_read_lock_held() && !rcu_read_lock_bh_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (1149), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });


return (struct dst_entry *)(skb->_skb_refdst & ~(1UL));
}
# 1161 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
{
skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst;
}
# 1177 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
({ int __ret_warn_on = !!(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (1179), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst | 1UL;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_dst_is_noref(const struct sk_buff *skb)
{
return (skb->_skb_refdst & 1UL) && skb_dst(skb);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct rtable *skb_rtable(const struct sk_buff *skb)
{
return (struct rtable *)skb_dst(skb);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_pkt_type_ok(u32 ptype)
{
return ptype <= 3;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int skb_napi_id(const struct sk_buff *skb)
{

return skb->napi_id;



}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_unref(struct sk_buff *skb)
{
if (__builtin_expect(!!(!skb), 0))
return false;
if (__builtin_expect(!!(refcount_read(&skb->users) == 1), 1))
do { do { } while (0); __asm__ __volatile__ ("fence " "r" "," "r" : : : "memory"); } while (0);
else if (__builtin_expect(!!(!refcount_dec_and_test(&skb->users)), 1))
return false;

return true;
}

void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kfree_skb(struct sk_buff *skb)
{
kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
}

void skb_release_head_state(struct sk_buff *skb);
void kfree_skb_list_reason(struct sk_buff *segs,
enum skb_drop_reason reason);
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
void skb_tx_error(struct sk_buff *skb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kfree_skb_list(struct sk_buff *segs)
{
kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED);
}


void consume_skb(struct sk_buff *skb);







void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;

void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
bool *fragstolen, int *delta_truesize);

struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
int node);
struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb_around(struct sk_buff *skb,
void *data, unsigned int frag_size);

struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
# 1297 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
{
return __alloc_skb(size, priority, 0, (-1));
}

struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
unsigned long data_len,
int max_page_order,
int *errcode,
gfp_t gfp_mask);
struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);


struct sk_buff_fclones {
struct sk_buff skb1;

struct sk_buff skb2;

refcount_t fclone_ref;
};
# 1328 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_fclone_busy(const struct sock *sk,
const struct sk_buff *skb)
{
const struct sk_buff_fclones *fclones;

fclones = ({ void *__mptr = (void *)(skb); _Static_assert(__builtin_types_compatible_p(typeof(*(skb)), typeof(((struct sk_buff_fclones *)0)->skb1)) || __builtin_types_compatible_p(typeof(*(skb)), typeof(void)), "pointer type mismatch in container_of()"); ((struct sk_buff_fclones *)(__mptr - __builtin_offsetof(struct sk_buff_fclones, skb1))); });

return skb->fclone == SKB_FCLONE_ORIG &&
refcount_read(&fclones->fclone_ref) > 1 &&
({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_265(void) ; if (!((sizeof(fclones->skb2.sk) == sizeof(char) || sizeof(fclones->skb2.sk) == sizeof(short) || sizeof(fclones->skb2.sk) == sizeof(int) || sizeof(fclones->skb2.sk) == sizeof(long)) || sizeof(fclones->skb2.sk) == sizeof(long long))) __compiletime_assert_265(); } while (0); (*(const volatile typeof( _Generic((fclones->skb2.sk), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (fclones->skb2.sk))) *)&(fclones->skb2.sk)); }) == sk;
}
# 1347 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority)
{
return __alloc_skb(size, priority, 0x01, (-1));
}

struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
void skb_headers_offset_update(struct sk_buff *skb, int off);
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
gfp_t gfp_mask, bool fclone);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
gfp_t gfp_mask)
{
return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
}

int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom);
struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
int newtailroom, gfp_t priority);
int __attribute__((__warn_unused_result__)) skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len);
int __attribute__((__warn_unused_result__)) skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len);
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
# 1391 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_pad(struct sk_buff *skb, int pad)
{
return __skb_pad(skb, pad, true);
}


int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
int offset, size_t size);

struct skb_seq_state {
__u32 lower_offset;
__u32 upper_offset;
__u32 frag_idx;
__u32 stepped_offset;
struct sk_buff *root_skb;
struct sk_buff *cur_skb;
__u8 *frag_data;
__u32 frag_off;
};

void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
unsigned int to, struct skb_seq_state *st);
unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
struct skb_seq_state *st);
void skb_abort_seq_read(struct skb_seq_state *st);

unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config);
# 1446 "./include/linux/skbuff.h"
enum pkt_hash_types {
PKT_HASH_TYPE_NONE,
PKT_HASH_TYPE_L2,
PKT_HASH_TYPE_L3,
PKT_HASH_TYPE_L4,
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_clear_hash(struct sk_buff *skb)
{
skb->hash = 0;
skb->sw_hash = 0;
skb->l4_hash = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_clear_hash_if_not_l4(struct sk_buff *skb)
{
if (!skb->l4_hash)
skb_clear_hash(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
{
skb->l4_hash = is_l4;
skb->sw_hash = is_sw;
skb->hash = hash;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
{

__skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
{
__skb_set_hash(skb, hash, true, is_l4);
}

void __skb_get_hash(struct sk_buff *skb);
u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
const struct flow_keys_basic *keys, int hlen);
__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
const void *data, int hlen_proto);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be32 skb_flow_get_ports(const struct sk_buff *skb,
int thoff, u8 ip_proto)
{
return __skb_flow_get_ports(skb, thoff, ip_proto, ((void *)0), 0);
}

void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
const struct flow_dissector_key *key,
unsigned int key_count);

struct bpf_flow_dissector;
bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
__be16 proto, int nhoff, int hlen, unsigned int flags);

bool __skb_flow_dissect(const struct net *net,
const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container, const void *data,
__be16 proto, int nhoff, int hlen, unsigned int flags);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container, unsigned int flags)
{
return __skb_flow_dissect(((void *)0), skb, flow_dissector,
target_container, ((void *)0), 0, 0, 0, flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
struct flow_keys *flow,
unsigned int flags)
{
memset(flow, 0, sizeof(*flow));
return __skb_flow_dissect(((void *)0), skb, &flow_keys_dissector,
flow, ((void *)0), 0, 0, 0, flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
skb_flow_dissect_flow_keys_basic(const struct net *net,
const struct sk_buff *skb,
struct flow_keys_basic *flow,
const void *data, __be16 proto,
int nhoff, int hlen, unsigned int flags)
{
memset(flow, 0, sizeof(*flow));
return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
data, proto, nhoff, hlen, flags);
}

void skb_flow_dissect_meta(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container);





void
skb_flow_dissect_ct(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
u16 *ctinfo_map, size_t mapsize,
bool post_ct, u16 zone);
void
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container);

void skb_flow_dissect_hash(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
__skb_get_hash(skb);

return skb->hash;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
{
if (!skb->l4_hash && !skb->sw_hash) {
struct flow_keys keys;
__u32 hash = __get_hash_from_flowi6(fl6, &keys);

__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
}

return skb->hash;
}

__u32 skb_get_hash_perturb(const struct sk_buff *skb,
const siphash_key_t *perturb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
return skb->hash;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
{
to->hash = from->hash;
to->sw_hash = from->sw_hash;
to->l4_hash = from->l4_hash;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_copy_decrypted(struct sk_buff *to,
const struct sk_buff *from)
{



}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
return skb->head + skb->end;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int skb_end_offset(const struct sk_buff *skb)
{
return skb->end;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
{
skb->end = offset;
}
# 1645 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
{
return &((struct skb_shared_info *)(skb_end_pointer(skb)))->hwtstamps;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ubuf_info *skb_zcopy(struct sk_buff *skb)
{
bool is_zcopy = skb && ((struct skb_shared_info *)(skb_end_pointer(skb)))->flags & SKBFL_ZEROCOPY_ENABLE;

return is_zcopy ? ((struct ubuf_info *)(((struct skb_shared_info *)(skb_end_pointer(skb)))->destructor_arg)) : ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_zcopy_pure(const struct sk_buff *skb)
{
return ((struct skb_shared_info *)(skb_end_pointer(skb)))->flags & SKBFL_PURE_ZEROCOPY;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_pure_zcopy_same(const struct sk_buff *skb1,
const struct sk_buff *skb2)
{
return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void net_zcopy_get(struct ubuf_info *uarg)
{
refcount_inc(&uarg->refcnt);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg)
{
((struct skb_shared_info *)(skb_end_pointer(skb)))->destructor_arg = uarg;
((struct skb_shared_info *)(skb_end_pointer(skb)))->flags |= uarg->flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
bool *have_ref)
{
if (skb && uarg && !skb_zcopy(skb)) {
if (__builtin_expect(!!(have_ref && *have_ref), 0))
*have_ref = false;
else
net_zcopy_get(uarg);
skb_zcopy_init(skb, uarg);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
{
((struct skb_shared_info *)(skb_end_pointer(skb)))->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
((struct skb_shared_info *)(skb_end_pointer(skb)))->flags |= (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_zcopy_is_nouarg(struct sk_buff *skb)
{
return (uintptr_t) ((struct skb_shared_info *)(skb_end_pointer(skb)))->destructor_arg & 0x1UL;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *skb_zcopy_get_nouarg(struct sk_buff *skb)
{
return (void *)((uintptr_t) ((struct skb_shared_info *)(skb_end_pointer(skb)))->destructor_arg & ~0x1UL);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void net_zcopy_put(struct ubuf_info *uarg)
{
if (uarg)
uarg->callback(((void *)0), uarg, true);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
{
if (uarg) {
if (uarg->callback == msg_zerocopy_callback)
msg_zerocopy_put_abort(uarg, have_uref);
else if (have_uref)
net_zcopy_put(uarg);
}
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
{
struct ubuf_info *uarg = skb_zcopy(skb);

if (uarg) {
if (!skb_zcopy_is_nouarg(skb))
uarg->callback(skb, uarg, zerocopy_success);

((struct skb_shared_info *)(skb_end_pointer(skb)))->flags &= ~((SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG) | SKBFL_PURE_ZEROCOPY);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_mark_not_on_list(struct sk_buff *skb)
{
skb->next = ((void *)0);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_list_del_init(struct sk_buff *skb)
{
__list_del_entry(&skb->list);
skb_mark_not_on_list(skb);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_queue_empty(const struct sk_buff_head *list)
{
return list->next == (const struct sk_buff *) list;
}
# 1770 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_queue_empty_lockless(const struct sk_buff_head *list)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_266(void) ; if (!((sizeof(list->next) == sizeof(char) || sizeof(list->next) == sizeof(short) || sizeof(list->next) == sizeof(int) || sizeof(list->next) == sizeof(long)) || sizeof(list->next) == sizeof(long long))) __compiletime_assert_266(); } while (0); (*(const volatile typeof( _Generic((list->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (list->next))) *)&(list->next)); }) == (const struct sk_buff *) list;
}
# 1783 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_queue_is_last(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
return skb->next == (const struct sk_buff *) list;
}
# 1796 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_queue_is_first(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
return skb->prev == (const struct sk_buff *) list;
}
# 1810 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
const struct sk_buff *skb)
{



do { if (__builtin_expect(!!(skb_queue_is_last(list, skb)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (1816), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
return skb->next;
}
# 1828 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
const struct sk_buff *skb)
{



do { if (__builtin_expect(!!(skb_queue_is_first(list, skb)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (1834), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
return skb->prev;
}
# 1845 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *skb_get(struct sk_buff *skb)
{
refcount_inc(&skb->users);
return skb;
}
# 1863 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_cloned(const struct sk_buff *skb)
{
return skb->cloned &&
(atomic_read(&((struct skb_shared_info *)(skb_end_pointer(skb)))->dataref) & ((1 << 16) - 1)) != 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_unclone(struct sk_buff *skb, gfp_t pri)
{
do { if (gfpflags_allow_blocking(pri)) do { __might_sleep("include/linux/skbuff.h", 1871); __cond_resched(); } while (0); } while (0);

if (skb_cloned(skb))
return pskb_expand_head(skb, 0, 0, pri);

return 0;
}







int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
{
do { if (gfpflags_allow_blocking(pri)) do { __might_sleep("include/linux/skbuff.h", 1888); __cond_resched(); } while (0); } while (0);

if (skb_cloned(skb))
return __skb_unclone_keeptruesize(skb, pri);
return 0;
}
# 1902 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_header_cloned(const struct sk_buff *skb)
{
int dataref;

if (!skb->cloned)
return 0;

dataref = atomic_read(&((struct skb_shared_info *)(skb_end_pointer(skb)))->dataref);
dataref = (dataref & ((1 << 16) - 1)) - (dataref >> 16);
return dataref != 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
{
do { if (gfpflags_allow_blocking(pri)) do { __might_sleep("include/linux/skbuff.h", 1916); __cond_resched(); } while (0); } while (0);

if (skb_header_cloned(skb))
return pskb_expand_head(skb, 0, 0, pri);

return 0;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_header_release(struct sk_buff *skb)
{
skb->nohdr = 1;
atomic_set(&((struct skb_shared_info *)(skb_end_pointer(skb)))->dataref, 1 + (1 << 16));
}
# 1942 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_shared(const struct sk_buff *skb)
{
return refcount_read(&skb->users) != 1;
}
# 1960 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
{
do { if (gfpflags_allow_blocking(pri)) do { __might_sleep("include/linux/skbuff.h", 1962); __cond_resched(); } while (0); } while (0);
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, pri);

if (__builtin_expect(!!(nskb), 1))
consume_skb(skb);
else
kfree_skb(skb);
skb = nskb;
}
return skb;
}
# 1995 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *skb_unshare(struct sk_buff *skb,
gfp_t pri)
{
do { if (gfpflags_allow_blocking(pri)) do { __might_sleep("include/linux/skbuff.h", 1998); __cond_resched(); } while (0); } while (0);
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, pri);


if (__builtin_expect(!!(nskb), 1))
consume_skb(skb);
else
kfree_skb(skb);
skb = nskb;
}
return skb;
}
# 2025 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *skb_peek(const struct sk_buff_head *list_)
{
struct sk_buff *skb = list_->next;

if (skb == (struct sk_buff *)list_)
skb = ((void *)0);
return skb;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
{
return list_->next;
}
# 2054 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *skb_peek_next(struct sk_buff *skb,
const struct sk_buff_head *list_)
{
struct sk_buff *next = skb->next;

if (next == (struct sk_buff *)list_)
next = ((void *)0);
return next;
}
# 2077 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
struct sk_buff *skb = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_267(void) ; if (!((sizeof(list_->prev) == sizeof(char) || sizeof(list_->prev) == sizeof(short) || sizeof(list_->prev) == sizeof(int) || sizeof(list_->prev) == sizeof(long)) || sizeof(list_->prev) == sizeof(long long))) __compiletime_assert_267(); } while (0); (*(const volatile typeof( _Generic((list_->prev), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (list_->prev))) *)&(list_->prev)); });

if (skb == (struct sk_buff *)list_)
skb = ((void *)0);
return skb;

}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 skb_queue_len(const struct sk_buff_head *list_)
{
return list_->qlen;
}
# 2105 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_268(void) ; if (!((sizeof(list_->qlen) == sizeof(char) || sizeof(list_->qlen) == sizeof(short) || sizeof(list_->qlen) == sizeof(int) || sizeof(list_->qlen) == sizeof(long)) || sizeof(list_->qlen) == sizeof(long long))) __compiletime_assert_268(); } while (0); (*(const volatile typeof( _Generic((list_->qlen), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (list_->qlen))) *)&(list_->qlen)); });
}
# 2120 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_queue_head_init(struct sk_buff_head *list)
{
list->prev = list->next = (struct sk_buff *)list;
list->qlen = 0;
}
# 2134 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_queue_head_init(struct sk_buff_head *list)
{
do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(&list->lock), "&list->lock", &__key, LD_WAIT_CONFIG); } while (0);
__skb_queue_head_init(list);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
skb_queue_head_init(list);
lockdep_init_map_waits(&(&list->lock)->dep_map, "class", class, 0, (&list->lock)->dep_map.wait_type_inner, (&list->lock)->dep_map.wait_type_outer);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{



do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_269(void) ; if (!((sizeof(newsk->next) == sizeof(char) || sizeof(newsk->next) == sizeof(short) || sizeof(newsk->next) == sizeof(int) || sizeof(newsk->next) == sizeof(long)) || sizeof(newsk->next) == sizeof(long long))) __compiletime_assert_269(); } while (0); do { *(volatile typeof(newsk->next) *)&(newsk->next) = (next); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_270(void) ; if (!((sizeof(newsk->prev) == sizeof(char) || sizeof(newsk->prev) == sizeof(short) || sizeof(newsk->prev) == sizeof(int) || sizeof(newsk->prev) == sizeof(long)) || sizeof(newsk->prev) == sizeof(long long))) __compiletime_assert_270(); } while (0); do { *(volatile typeof(newsk->prev) *)&(newsk->prev) = (prev); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_271(void) ; if (!((sizeof(((struct sk_buff_list *)next)->prev) == sizeof(char) || sizeof(((struct sk_buff_list *)next)->prev) == sizeof(short) || sizeof(((struct sk_buff_list *)next)->prev) == sizeof(int) || sizeof(((struct sk_buff_list *)next)->prev) == sizeof(long)) || sizeof(((struct sk_buff_list *)next)->prev) == sizeof(long long))) __compiletime_assert_271(); } while (0); do { *(volatile typeof(((struct sk_buff_list *)next)->prev) *)&(((struct sk_buff_list *)next)->prev) = (newsk); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_272(void) ; if (!((sizeof(((struct sk_buff_list *)prev)->next) == sizeof(char) || sizeof(((struct sk_buff_list *)prev)->next) == sizeof(short) || sizeof(((struct sk_buff_list *)prev)->next) == sizeof(int) || sizeof(((struct sk_buff_list *)prev)->next) == sizeof(long)) || sizeof(((struct sk_buff_list *)prev)->next) == sizeof(long long))) __compiletime_assert_272(); } while (0); do { *(volatile typeof(((struct sk_buff_list *)prev)->next) *)&(((struct sk_buff_list *)prev)->next) = (newsk); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_273(void) ; if (!((sizeof(list->qlen) == sizeof(char) || sizeof(list->qlen) == sizeof(short) || sizeof(list->qlen) == sizeof(int) || sizeof(list->qlen) == sizeof(long)) || sizeof(list->qlen) == sizeof(long long))) __compiletime_assert_273(); } while (0); do { *(volatile typeof(list->qlen) *)&(list->qlen) = (list->qlen + 1); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_queue_splice(const struct sk_buff_head *list,
struct sk_buff *prev,
struct sk_buff *next)
{
struct sk_buff *first = list->next;
struct sk_buff *last = list->prev;

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_274(void) ; if (!((sizeof(first->prev) == sizeof(char) || sizeof(first->prev) == sizeof(short) || sizeof(first->prev) == sizeof(int) || sizeof(first->prev) == sizeof(long)) || sizeof(first->prev) == sizeof(long long))) __compiletime_assert_274(); } while (0); do { *(volatile typeof(first->prev) *)&(first->prev) = (prev); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_275(void) ; if (!((sizeof(prev->next) == sizeof(char) || sizeof(prev->next) == sizeof(short) || sizeof(prev->next) == sizeof(int) || sizeof(prev->next) == sizeof(long)) || sizeof(prev->next) == sizeof(long long))) __compiletime_assert_275(); } while (0); do { *(volatile typeof(prev->next) *)&(prev->next) = (first); } while (0); } while (0);

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_276(void) ; if (!((sizeof(last->next) == sizeof(char) || sizeof(last->next) == sizeof(short) || sizeof(last->next) == sizeof(int) || sizeof(last->next) == sizeof(long)) || sizeof(last->next) == sizeof(long long))) __compiletime_assert_276(); } while (0); do { *(volatile typeof(last->next) *)&(last->next) = (next); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_277(void) ; if (!((sizeof(next->prev) == sizeof(char) || sizeof(next->prev) == sizeof(short) || sizeof(next->prev) == sizeof(int) || sizeof(next->prev) == sizeof(long)) || sizeof(next->prev) == sizeof(long long))) __compiletime_assert_277(); } while (0); do { *(volatile typeof(next->prev) *)&(next->prev) = (last); } while (0); } while (0);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_queue_splice(const struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, (struct sk_buff *) head, head->next);
head->qlen += list->qlen;
}
}
# 2202 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_queue_splice_init(struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, (struct sk_buff *) head, head->next);
head->qlen += list->qlen;
__skb_queue_head_init(list);
}
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_queue_splice_tail(const struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
head->qlen += list->qlen;
}
}
# 2234 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_queue_splice_tail_init(struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
head->qlen += list->qlen;
__skb_queue_head_init(list);
}
}
# 2255 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_queue_after(struct sk_buff_head *list,
struct sk_buff *prev,
struct sk_buff *newsk)
{
__skb_insert(newsk, prev, ((struct sk_buff_list *)prev)->next, list);
}

void skb_append(struct sk_buff *old, struct sk_buff *newsk,
struct sk_buff_head *list);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_queue_before(struct sk_buff_head *list,
struct sk_buff *next,
struct sk_buff *newsk)
{
__skb_insert(newsk, ((struct sk_buff_list *)next)->prev, next, list);
}
# 2282 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_queue_head(struct sk_buff_head *list,
struct sk_buff *newsk)
{
__skb_queue_after(list, (struct sk_buff *)list, newsk);
}
void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
# 2299 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_queue_tail(struct sk_buff_head *list,
struct sk_buff *newsk)
{
__skb_queue_before(list, (struct sk_buff *)list, newsk);
}
void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);





void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
struct sk_buff *next, *prev;

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_278(void) ; if (!((sizeof(list->qlen) == sizeof(char) || sizeof(list->qlen) == sizeof(short) || sizeof(list->qlen) == sizeof(int) || sizeof(list->qlen) == sizeof(long)) || sizeof(list->qlen) == sizeof(long long))) __compiletime_assert_278(); } while (0); do { *(volatile typeof(list->qlen) *)&(list->qlen) = (list->qlen - 1); } while (0); } while (0);
next = skb->next;
prev = skb->prev;
skb->next = skb->prev = ((void *)0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_279(void) ; if (!((sizeof(next->prev) == sizeof(char) || sizeof(next->prev) == sizeof(short) || sizeof(next->prev) == sizeof(int) || sizeof(next->prev) == sizeof(long)) || sizeof(next->prev) == sizeof(long long))) __compiletime_assert_279(); } while (0); do { *(volatile typeof(next->prev) *)&(next->prev) = (prev); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_280(void) ; if (!((sizeof(prev->next) == sizeof(char) || sizeof(prev->next) == sizeof(short) || sizeof(prev->next) == sizeof(int) || sizeof(prev->next) == sizeof(long)) || sizeof(prev->next) == sizeof(long long))) __compiletime_assert_280(); } while (0); do { *(volatile typeof(prev->next) *)&(prev->next) = (next); } while (0); } while (0);
}
# 2331 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek(list);
if (skb)
__skb_unlink(skb, list);
return skb;
}
struct sk_buff *skb_dequeue(struct sk_buff_head *list);
# 2348 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek_tail(list);
if (skb)
__skb_unlink(skb, list);
return skb;
}
struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_is_nonlinear(const struct sk_buff *skb)
{
return skb->data_len;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int skb_headlen(const struct sk_buff *skb)
{
return skb->len - skb->data_len;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int __skb_pagelen(const struct sk_buff *skb)
{
unsigned int i, len = 0;

for (i = ((struct skb_shared_info *)(skb_end_pointer(skb)))->nr_frags - 1; (int)i >= 0; i--)
len += skb_frag_size(&((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[i]);
return len;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int skb_pagelen(const struct sk_buff *skb)
{
return skb_headlen(skb) + __skb_pagelen(skb);
}
# 2395 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
skb_frag_t *frag = &((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[i];






frag->bv_page = page;
frag->bv_offset = off;
skb_frag_size_set(frag, size);

page = ((typeof(page))_compound_head(page));
if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
}
# 2428 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
__skb_fill_page_desc(skb, i, page, off, size);
((struct skb_shared_info *)(skb_end_pointer(skb)))->nr_frags = i + 1;
}

void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
int size, unsigned int truesize);

void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
return skb->head + skb->tail;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_reset_tail_pointer(struct sk_buff *skb)
{
skb->tail = skb->data - skb->head;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
skb_reset_tail_pointer(skb);
skb->tail += offset;
}
# 2481 "./include/linux/skbuff.h"
void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
void *skb_put(struct sk_buff *skb, unsigned int len);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *__skb_put(struct sk_buff *skb, unsigned int len)
{
void *tmp = skb_tail_pointer(skb);
do { if (__builtin_expect(!!(skb_is_nonlinear(skb)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (2486), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
skb->tail += len;
skb->len += len;
return tmp;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
{
void *tmp = __skb_put(skb, len);

memset(tmp, 0, len);
return tmp;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *__skb_put_data(struct sk_buff *skb, const void *data,
unsigned int len)
{
void *tmp = __skb_put(skb, len);

memcpy(tmp, data, len);
return tmp;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_put_u8(struct sk_buff *skb, u8 val)
{
*(u8 *)__skb_put(skb, 1) = val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *skb_put_zero(struct sk_buff *skb, unsigned int len)
{
void *tmp = skb_put(skb, len);

memset(tmp, 0, len);

return tmp;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *skb_put_data(struct sk_buff *skb, const void *data,
unsigned int len)
{
void *tmp = skb_put(skb, len);

memcpy(tmp, data, len);

return tmp;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_put_u8(struct sk_buff *skb, u8 val)
{
*(u8 *)skb_put(skb, 1) = val;
}

void *skb_push(struct sk_buff *skb, unsigned int len);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *__skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
skb->len += len;
return skb->data;
}

void *skb_pull(struct sk_buff *skb, unsigned int len);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *__skb_pull(struct sk_buff *skb, unsigned int len)
{
skb->len -= len;
do { if (__builtin_expect(!!(skb->len < skb->data_len), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (2550), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
return skb->data += len;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
{
return __builtin_expect(!!(len > skb->len), 0) ? ((void *)0) : __skb_pull(skb, len);
}

void *skb_pull_data(struct sk_buff *skb, size_t len);

void *__pskb_pull_tail(struct sk_buff *skb, int delta);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
if (len > skb_headlen(skb) &&
!__pskb_pull_tail(skb, len - skb_headlen(skb)))
return ((void *)0);
skb->len -= len;
return skb->data += len;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *pskb_pull(struct sk_buff *skb, unsigned int len)
{
return __builtin_expect(!!(len > skb->len), 0) ? ((void *)0) : __pskb_pull(skb, len);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
if (__builtin_expect(!!(len <= skb_headlen(skb)), 1))
return true;
if (__builtin_expect(!!(len > skb->len), 0))
return false;
return __pskb_pull_tail(skb, len - skb_headlen(skb)) != ((void *)0);
}

void skb_condense(struct sk_buff *skb);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int skb_headroom(const struct sk_buff *skb)
{
return skb->data - skb->head;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_tailroom(const struct sk_buff *skb)
{
return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
}
# 2617 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_availroom(const struct sk_buff *skb)
{
if (skb_is_nonlinear(skb))
return 0;

return skb->end - skb->tail - skb->reserved_tailroom;
}
# 2633 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_reserve(struct sk_buff *skb, int len)
{
skb->data += len;
skb->tail += len;
}
# 2651 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
unsigned int needed_tailroom)
{
do { if (__builtin_expect(!!(skb_is_nonlinear(skb)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (2654), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
if (mtu < skb_tailroom(skb) - needed_tailroom)

skb->reserved_tailroom = skb_tailroom(skb) - mtu;
else

skb->reserved_tailroom = needed_tailroom;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_inner_protocol(struct sk_buff *skb,
__be16 protocol)
{
skb->inner_protocol = protocol;
skb->inner_protocol_type = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_inner_ipproto(struct sk_buff *skb,
__u8 ipproto)
{
skb->inner_ipproto = ipproto;
skb->inner_protocol_type = 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_reset_inner_headers(struct sk_buff *skb)
{
skb->inner_mac_header = skb->mac_header;
skb->inner_network_header = skb->network_header;
skb->inner_transport_header = skb->transport_header;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_reset_mac_len(struct sk_buff *skb)
{
skb->mac_len = skb->network_header - skb->mac_header;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned char *skb_inner_transport_header(const struct sk_buff
*skb)
{
return skb->head + skb->inner_transport_header;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_inner_transport_offset(const struct sk_buff *skb)
{
return skb_inner_transport_header(skb) - skb->data;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_reset_inner_transport_header(struct sk_buff *skb)
{
skb->inner_transport_header = skb->data - skb->head;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_inner_transport_header(struct sk_buff *skb,
const int offset)
{
skb_reset_inner_transport_header(skb);
skb->inner_transport_header += offset;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned char *skb_inner_network_header(const struct sk_buff *skb)
{
return skb->head + skb->inner_network_header;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_reset_inner_network_header(struct sk_buff *skb)
{
skb->inner_network_header = skb->data - skb->head;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_inner_network_header(struct sk_buff *skb,
const int offset)
{
skb_reset_inner_network_header(skb);
skb->inner_network_header += offset;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
{
return skb->head + skb->inner_mac_header;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_reset_inner_mac_header(struct sk_buff *skb)
{
skb->inner_mac_header = skb->data - skb->head;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_inner_mac_header(struct sk_buff *skb,
const int offset)
{
skb_reset_inner_mac_header(skb);
skb->inner_mac_header += offset;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_transport_header_was_set(const struct sk_buff *skb)
{
return skb->transport_header != (typeof(skb->transport_header))~0U;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned char *skb_transport_header(const struct sk_buff *skb)
{
return skb->head + skb->transport_header;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_reset_transport_header(struct sk_buff *skb)
{
skb->transport_header = skb->data - skb->head;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_transport_header(struct sk_buff *skb,
const int offset)
{
skb_reset_transport_header(skb);
skb->transport_header += offset;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned char *skb_network_header(const struct sk_buff *skb)
{
return skb->head + skb->network_header;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_reset_network_header(struct sk_buff *skb)
{
skb->network_header = skb->data - skb->head;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_network_header(struct sk_buff *skb, const int offset)
{
skb_reset_network_header(skb);
skb->network_header += offset;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned char *skb_mac_header(const struct sk_buff *skb)
{
return skb->head + skb->mac_header;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_mac_offset(const struct sk_buff *skb)
{
return skb_mac_header(skb) - skb->data;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 skb_mac_header_len(const struct sk_buff *skb)
{
return skb->network_header - skb->mac_header;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_mac_header_was_set(const struct sk_buff *skb)
{
return skb->mac_header != (typeof(skb->mac_header))~0U;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_unset_mac_header(struct sk_buff *skb)
{
skb->mac_header = (typeof(skb->mac_header))~0U;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_reset_mac_header(struct sk_buff *skb)
{
skb->mac_header = skb->data - skb->head;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_mac_header(struct sk_buff *skb, const int offset)
{
skb_reset_mac_header(skb);
skb->mac_header += offset;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_pop_mac_header(struct sk_buff *skb)
{
skb->mac_header = skb->network_header;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_probe_transport_header(struct sk_buff *skb)
{
struct flow_keys_basic keys;

if (skb_transport_header_was_set(skb))
return;

if (skb_flow_dissect_flow_keys_basic(((void *)0), skb, &keys,
((void *)0), 0, 0, 0, 0))
skb_set_transport_header(skb, keys.control.thoff);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_mac_header_rebuild(struct sk_buff *skb)
{
if (skb_mac_header_was_set(skb)) {
const unsigned char *old_mac = skb_mac_header(skb);

skb_set_mac_header(skb, -skb->mac_len);
memmove(skb_mac_header(skb), old_mac, skb->mac_len);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_checksum_start_offset(const struct sk_buff *skb)
{
return skb->csum_start - skb_headroom(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned char *skb_checksum_start(const struct sk_buff *skb)
{
return skb->head + skb->csum_start;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_transport_offset(const struct sk_buff *skb)
{
return skb_transport_header(skb) - skb->data;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 skb_network_header_len(const struct sk_buff *skb)
{
return skb->transport_header - skb->network_header;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 skb_inner_network_header_len(const struct sk_buff *skb)
{
return skb->inner_transport_header - skb->inner_network_header;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_network_offset(const struct sk_buff *skb)
{
return skb_network_header(skb) - skb->data;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_inner_network_offset(const struct sk_buff *skb)
{
return skb_inner_network_header(skb) - skb->data;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
{
return pskb_may_pull(skb, skb_network_offset(skb) + len);
}
# 2937 "./include/linux/skbuff.h"
int ___pskb_trim(struct sk_buff *skb, unsigned int len);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_set_length(struct sk_buff *skb, unsigned int len)
{
if (({ int __ret_warn_on = !!(skb_is_nonlinear(skb)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (2941), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }))
return;
skb->len = len;
skb_set_tail_pointer(skb, len);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_trim(struct sk_buff *skb, unsigned int len)
{
__skb_set_length(skb, len);
}

void skb_trim(struct sk_buff *skb, unsigned int len);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
if (skb->data_len)
return ___pskb_trim(skb, len);
__skb_trim(skb, len);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pskb_trim(struct sk_buff *skb, unsigned int len)
{
return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}
# 2976 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
{
int err = pskb_trim(skb, len);
do { if (__builtin_expect(!!(err), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (2979), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __skb_grow(struct sk_buff *skb, unsigned int len)
{
unsigned int diff = len - skb->len;

if (skb_tailroom(skb) < diff) {
int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
if (ret)
return ret;
}
__skb_set_length(skb, len);
return 0;
}
# 3004 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_orphan(struct sk_buff *skb)
{
if (skb->destructor) {
skb->destructor(skb);
skb->destructor = ((void *)0);
skb->sk = ((void *)0);
} else {
do { if (__builtin_expect(!!(skb->sk), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (3011), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
}
}
# 3024 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
{
if (__builtin_expect(!!(!skb_zcopy(skb)), 1))
return 0;
if (!skb_zcopy_is_nouarg(skb) &&
((struct ubuf_info *)(((struct skb_shared_info *)(skb_end_pointer(skb)))->destructor_arg))->callback == msg_zerocopy_callback)
return 0;
return skb_copy_ubufs(skb, gfp_mask);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
{
if (__builtin_expect(!!(!skb_zcopy(skb)), 1))
return 0;
return skb_copy_ubufs(skb, gfp_mask);
}
# 3050 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(list)) != ((void *)0))
kfree_skb(skb);
}
void skb_queue_purge(struct sk_buff_head *list);

unsigned int skb_rbtree_purge(struct rb_root *root);

void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
# 3069 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *netdev_alloc_frag(unsigned int fragsz)
{
return __netdev_alloc_frag_align(fragsz, ~0u);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *netdev_alloc_frag_align(unsigned int fragsz,
unsigned int align)
{
({ int __ret_warn_on = !!(!is_power_of_2(align)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (3077), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return __netdev_alloc_frag_align(fragsz, -align);
}

struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
gfp_t gfp_mask);
# 3097 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *netdev_alloc_skb(struct net_device *dev,
unsigned int length)
{
return __netdev_alloc_skb(dev, length, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *__dev_alloc_skb(unsigned int length,
gfp_t gfp_mask)
{
return __netdev_alloc_skb(((void *)0), length, gfp_mask);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *dev_alloc_skb(unsigned int length)
{
return netdev_alloc_skb(((void *)0), length);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
unsigned int length, gfp_t gfp)
{
struct sk_buff *skb = __netdev_alloc_skb(dev, length + 2, gfp);

if (2 && skb)
skb_reserve(skb, 2);
return skb;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
unsigned int length)
{
return __netdev_alloc_skb_ip_align(dev, length, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_free_frag(void *addr)
{
page_frag_free(addr);
}

void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *napi_alloc_frag(unsigned int fragsz)
{
return __napi_alloc_frag_align(fragsz, ~0u);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *napi_alloc_frag_align(unsigned int fragsz,
unsigned int align)
{
({ int __ret_warn_on = !!(!is_power_of_2(align)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (3148), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return __napi_alloc_frag_align(fragsz, -align);
}

struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
unsigned int length, gfp_t gfp_mask);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
unsigned int length)
{
return __napi_alloc_skb(napi, length, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
}
void napi_consume_skb(struct sk_buff *skb, int budget);

void napi_skb_free_stolen_head(struct sk_buff *skb);
void __kfree_skb_defer(struct sk_buff *skb);
# 3173 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *__dev_alloc_pages(gfp_t gfp_mask,
unsigned int order)
{
# 3184 "./include/linux/skbuff.h"
gfp_mask |= (( gfp_t)0x40000u) | (( gfp_t)0x20000u);

return alloc_pages_node((-1), gfp_mask, order);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *dev_alloc_pages(unsigned int order)
{
return __dev_alloc_pages(((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)) | (( gfp_t)0x2000u), order);
}
# 3202 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *__dev_alloc_page(gfp_t gfp_mask)
{
return __dev_alloc_pages(gfp_mask, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *dev_alloc_page(void)
{
return dev_alloc_pages(0);
}
# 3222 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dev_page_is_reusable(const struct page *page)
{
return __builtin_expect(!!(page_to_nid(page) == numa_mem_id() && !page_is_pfmemalloc(page)), 1);

}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_propagate_pfmemalloc(const struct page *page,
struct sk_buff *skb)
{
if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int skb_frag_off(const skb_frag_t *frag)
{
return frag->bv_offset;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_frag_off_add(skb_frag_t *frag, int delta)
{
frag->bv_offset += delta;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
{
frag->bv_offset = offset;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_frag_off_copy(skb_frag_t *fragto,
const skb_frag_t *fragfrom)
{
fragto->bv_offset = fragfrom->bv_offset;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page *skb_frag_page(const skb_frag_t *frag)
{
return frag->bv_page;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_frag_ref(skb_frag_t *frag)
{
get_page(skb_frag_page(frag));
}
# 3309 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_frag_ref(struct sk_buff *skb, int f)
{
__skb_frag_ref(&((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[f]);
}
# 3322 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{
struct page *page = skb_frag_page(frag);


if (recycle && page_pool_return_skb_page(page))
return;

put_page(page);
}
# 3340 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_frag_unref(struct sk_buff *skb, int f)
{
__skb_frag_unref(&((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[f], skb->pp_recycle);
}
# 3352 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *skb_frag_address(const skb_frag_t *frag)
{
return lowmem_page_address(skb_frag_page(frag)) + skb_frag_off(frag);
}
# 3364 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *skb_frag_address_safe(const skb_frag_t *frag)
{
void *ptr = lowmem_page_address(skb_frag_page(frag));
if (__builtin_expect(!!(!ptr), 0))
return ((void *)0);

return ptr + skb_frag_off(frag);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_frag_page_copy(skb_frag_t *fragto,
const skb_frag_t *fragfrom)
{
fragto->bv_page = fragfrom->bv_page;
}
# 3391 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
{
frag->bv_page = page;
}
# 3404 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_frag_set_page(struct sk_buff *skb, int f,
struct page *page)
{
__skb_frag_set_page(&((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[f], page);
}

bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
# 3423 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) dma_addr_t skb_frag_dma_map(struct device *dev,
const skb_frag_t *frag,
size_t offset, size_t size,
enum dma_data_direction dir)
{
return dma_map_page_attrs(dev, skb_frag_page(frag), skb_frag_off(frag) + offset, size, dir, 0);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *pskb_copy(struct sk_buff *skb,
gfp_t gfp_mask)
{
return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
gfp_t gfp_mask)
{
return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
}
# 3454 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
{
return !skb_header_cloned(skb) &&
skb_headroom(skb) + len <= skb->hdr_len;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
pskb_expand_head(skb, 0, 0, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __skb_cow(struct sk_buff *skb, unsigned int headroom,
int cloned)
{
int delta = 0;

if (headroom > skb_headroom(skb))
delta = headroom - skb_headroom(skb);

if (delta || cloned)
return pskb_expand_head(skb, ((((delta)) + ((typeof((delta)))((__builtin_choose_expr(((!!(sizeof((typeof(32) *)1 == (typeof((1 << 6)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(32) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((1 << 6)) * 0l)) : (int *)8))))), ((32) > ((1 << 6)) ? (32) : ((1 << 6))), ({ typeof(32) __UNIQUE_ID___x281 = (32); typeof((1 << 6)) __UNIQUE_ID___y282 = ((1 << 6)); ((__UNIQUE_ID___x281) > (__UNIQUE_ID___y282) ? (__UNIQUE_ID___x281) : (__UNIQUE_ID___y282)); })))) - 1)) & ~((typeof((delta)))((__builtin_choose_expr(((!!(sizeof((typeof(32) *)1 == (typeof((1 << 6)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(32) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((1 << 6)) * 0l)) : (int *)8))))), ((32) > ((1 << 6)) ? (32) : ((1 << 6))), ({ typeof(32) __UNIQUE_ID___x281 = (32); typeof((1 << 6)) __UNIQUE_ID___y282 = ((1 << 6)); ((__UNIQUE_ID___x281) > (__UNIQUE_ID___y282) ? (__UNIQUE_ID___x281) : (__UNIQUE_ID___y282)); })))) - 1)), 0,
((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
return 0;
}
# 3493 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_cow(struct sk_buff *skb, unsigned int headroom)
{
return __skb_cow(skb, headroom, skb_cloned(skb));
}
# 3508 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
{
return __skb_cow(skb, headroom, skb_header_cloned(skb));
}
# 3523 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_padto(struct sk_buff *skb, unsigned int len)
{
unsigned int size = skb->len;
if (__builtin_expect(!!(size >= len), 1))
return 0;
return skb_pad(skb, len - size);
}
# 3542 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) __skb_put_padto(struct sk_buff *skb,
unsigned int len,
bool free_on_error)
{
unsigned int size = skb->len;

if (__builtin_expect(!!(size < len), 0)) {
len -= size;
if (__skb_pad(skb, len, free_on_error))
return -12;
__skb_put(skb, len);
}
return 0;
}
# 3567 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __attribute__((__warn_unused_result__)) skb_put_padto(struct sk_buff *skb, unsigned int len)
{
return __skb_put_padto(skb, len, true);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_add_data(struct sk_buff *skb,
struct iov_iter *from, int copy)
{
const int off = skb->len;

if (skb->ip_summed == 0) {
__wsum csum = 0;
if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
&csum, from)) {
skb->csum = csum_block_add(skb->csum, csum, off);
return 0;
}
} else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
return 0;

__skb_trim(skb, off);
return -14;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_can_coalesce(struct sk_buff *skb, int i,
const struct page *page, int off)
{
if (skb_zcopy(skb))
return false;
if (i) {
const skb_frag_t *frag = &((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[i - 1];

return page == skb_frag_page(frag) &&
off == skb_frag_off(frag) + skb_frag_size(frag);
}
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __skb_linearize(struct sk_buff *skb)
{
return __pskb_pull_tail(skb, skb->data_len) ? 0 : -12;
}
# 3617 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_linearize(struct sk_buff *skb)
{
return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
}
# 3629 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_has_shared_frag(const struct sk_buff *skb)
{
return skb_is_nonlinear(skb) &&
((struct skb_shared_info *)(skb_end_pointer(skb)))->flags & SKBFL_SHARED_FRAG;
}
# 3642 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_linearize_cow(struct sk_buff *skb)
{
return skb_is_nonlinear(skb) || skb_cloned(skb) ?
__skb_linearize(skb) : 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
unsigned int off)
{
if (skb->ip_summed == 2)
skb->csum = csum_block_sub(skb->csum,
csum_partial(start, len, 0), off);
else if (skb->ip_summed == 3 &&
skb_checksum_start_offset(skb) < 0)
skb->ip_summed = 0;
}
# 3670 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
if (skb->ip_summed == 2)
skb->csum = wsum_negate(csum_partial(start, len,
wsum_negate(skb->csum)));
else if (skb->ip_summed == 3 &&
skb_checksum_start_offset(skb) < 0)
skb->ip_summed = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
unsigned int off)
{
if (skb->ip_summed == 2)
skb->csum = csum_block_add(skb->csum,
csum_partial(start, len, 0), off);
}
# 3699 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
__skb_postpush_rcsum(skb, start, len, 0);
}

void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
# 3718 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
{
skb_push(skb, len);
skb_postpush_rcsum(skb, skb->data, len);
return skb->data;
}

int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
# 3736 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
if (__builtin_expect(!!(len >= skb->len), 1))
return 0;
return pskb_trim_rcsum_slow(skb, len);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
if (skb->ip_summed == 2)
skb->ip_summed = 0;
__skb_trim(skb, len);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
{
if (skb->ip_summed == 2)
skb->ip_summed = 0;
return __skb_grow(skb, len);
}
# 3810 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_has_frag_list(const struct sk_buff *skb)
{
return ((struct skb_shared_info *)(skb_end_pointer(skb)))->frag_list != ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_frag_list_init(struct sk_buff *skb)
{
((struct skb_shared_info *)(skb_end_pointer(skb)))->frag_list = ((void *)0);
}





int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
int *err, long *timeo_p,
const struct sk_buff *skb);
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
struct sk_buff_head *queue,
unsigned int flags,
int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
struct sk_buff_head *queue,
unsigned int flags, int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
unsigned int flags, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
struct iov_iter *to, int size);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
struct msghdr *msg, int size)
{
return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
}
int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
struct msghdr *msg);
int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
struct iov_iter *to, int len,
struct ahash_request *hash);
int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_free_datagram_locked(struct sock *sk,
struct sk_buff *skb)
{
__skb_free_datagram_locked(sk, skb, 0);
}
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
int len);
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
unsigned int flags);
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len);
int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
int len, int hlen);
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
int skb_eth_pop(struct sk_buff *skb);
int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
const unsigned char *src);
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
int mac_len, bool ethernet);
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
bool ethernet);
int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
int skb_mpls_dec_ttl(struct sk_buff *skb);
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
gfp_t gfp);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int memcpy_from_msg(void *data, struct msghdr *msg, int len)
{
return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -14;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int memcpy_to_msg(struct msghdr *msg, void *data, int len)
{
return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -14;
}

struct skb_checksum_ops {
__wsum (*update)(const void *mem, int len, __wsum wsum);
__wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
};

extern const struct skb_checksum_ops *crc32c_csum_stub ;

__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum, const struct skb_checksum_ops *ops);
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void * __attribute__((__warn_unused_result__))
__skb_header_pointer(const struct sk_buff *skb, int offset, int len,
const void *data, int hlen, void *buffer)
{
if (__builtin_expect(!!(hlen - offset >= len), 1))
return (void *)data + offset;

if (!skb || __builtin_expect(!!(skb_copy_bits(skb, offset, buffer, len) < 0), 0))
return ((void *)0);

return buffer;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void * __attribute__((__warn_unused_result__))
skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
{
return __skb_header_pointer(skb, offset, len, skb->data,
skb_headlen(skb), buffer);
}
# 3957 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_needs_linearize(struct sk_buff *skb,
netdev_features_t features)
{
return skb_is_nonlinear(skb) &&
((skb_has_frag_list(skb) && !(features & ((netdev_features_t)1 << (NETIF_F_FRAGLIST_BIT)))) ||
(((struct skb_shared_info *)(skb_end_pointer(skb)))->nr_frags && !(features & ((netdev_features_t)1 << (NETIF_F_SG_BIT)))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_copy_from_linear_data(const struct sk_buff *skb,
void *to,
const unsigned int len)
{
memcpy(to, skb->data, len);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
const int offset, void *to,
const unsigned int len)
{
memcpy(to, skb->data + offset, len);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_copy_to_linear_data(struct sk_buff *skb,
const void *from,
const unsigned int len)
{
memcpy(skb->data, from, len);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_copy_to_linear_data_offset(struct sk_buff *skb,
const int offset,
const void *from,
const unsigned int len)
{
memcpy(skb->data + offset, from, len);
}

void skb_init(void);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t skb_get_ktime(const struct sk_buff *skb)
{
return skb->tstamp;
}
# 4010 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_get_timestamp(const struct sk_buff *skb,
struct __kernel_old_timeval *stamp)
{
*stamp = ns_to_kernel_old_timeval(skb->tstamp);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_get_new_timestamp(const struct sk_buff *skb,
struct __kernel_sock_timeval *stamp)
{
struct timespec64 ts = ns_to_timespec64((skb->tstamp));

stamp->tv_sec = ts.tv_sec;
stamp->tv_usec = ts.tv_nsec / 1000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_get_timestampns(const struct sk_buff *skb,
struct __kernel_old_timespec *stamp)
{
struct timespec64 ts = ns_to_timespec64((skb->tstamp));

stamp->tv_sec = ts.tv_sec;
stamp->tv_nsec = ts.tv_nsec;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_get_new_timestampns(const struct sk_buff *skb,
struct __kernel_timespec *stamp)
{
struct timespec64 ts = ns_to_timespec64((skb->tstamp));

stamp->tv_sec = ts.tv_sec;
stamp->tv_nsec = ts.tv_nsec;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __net_timestamp(struct sk_buff *skb)
{
skb->tstamp = ktime_get_real();
skb->mono_delivery_time = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t net_timedelta(ktime_t t)
{
return ((ktime_get_real()) - (t));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt,
bool mono)
{
skb->tstamp = kt;
skb->mono_delivery_time = kt && mono;
}

extern struct static_key_false netstamp_needed_key;




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_clear_delivery_time(struct sk_buff *skb)
{
if (skb->mono_delivery_time) {
skb->mono_delivery_time = 0;
if (__builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&netstamp_needed_key)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&netstamp_needed_key)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&netstamp_needed_key)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&netstamp_needed_key)->key) > 0; })), 0))
skb->tstamp = ktime_get_real();
else
skb->tstamp = 0;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_clear_tstamp(struct sk_buff *skb)
{
if (skb->mono_delivery_time)
return;

skb->tstamp = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t skb_tstamp(const struct sk_buff *skb)
{
if (skb->mono_delivery_time)
return 0;

return skb->tstamp;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)
{
if (!skb->mono_delivery_time && skb->tstamp)
return skb->tstamp;

if (__builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&netstamp_needed_key)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&netstamp_needed_key)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&netstamp_needed_key)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&netstamp_needed_key)->key) > 0; })), 0) || cond)
return ktime_get_real();

return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 skb_metadata_len(const struct sk_buff *skb)
{
return ((struct skb_shared_info *)(skb_end_pointer(skb)))->meta_len;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *skb_metadata_end(const struct sk_buff *skb)
{
return skb_mac_header(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __skb_metadata_differs(const struct sk_buff *skb_a,
const struct sk_buff *skb_b,
u8 meta_len)
{
const void *a = skb_metadata_end(skb_a);
const void *b = skb_metadata_end(skb_b);
# 4146 "./include/linux/skbuff.h"
return memcmp(a - meta_len, b - meta_len, meta_len);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_metadata_differs(const struct sk_buff *skb_a,
const struct sk_buff *skb_b)
{
u8 len_a = skb_metadata_len(skb_a);
u8 len_b = skb_metadata_len(skb_b);

if (!(len_a | len_b))
return false;

return len_a != len_b ?
true : __skb_metadata_differs(skb_a, skb_b, len_a);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
{
((struct skb_shared_info *)(skb_end_pointer(skb)))->meta_len = meta_len;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_metadata_clear(struct sk_buff *skb)
{
skb_metadata_set(skb, 0);
}

struct sk_buff *skb_clone_sk(struct sk_buff *skb);
# 4182 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_clone_tx_timestamp(struct sk_buff *skb)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_defer_rx_timestamp(struct sk_buff *skb)
{
return false;
}
# 4205 "./include/linux/skbuff.h"
void skb_complete_tx_timestamp(struct sk_buff *skb,
struct skb_shared_hwtstamps *hwtstamps);

void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb,
struct skb_shared_hwtstamps *hwtstamps,
struct sock *sk, int tstype);
# 4223 "./include/linux/skbuff.h"
void skb_tstamp_tx(struct sk_buff *orig_skb,
struct skb_shared_hwtstamps *hwtstamps);
# 4238 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_tx_timestamp(struct sk_buff *skb)
{
skb_clone_tx_timestamp(skb);
if (((struct skb_shared_info *)(skb_end_pointer(skb)))->tx_flags & SKBTX_SW_TSTAMP)
skb_tstamp_tx(skb, ((void *)0));
}
# 4252 "./include/linux/skbuff.h"
void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);

__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
__sum16 __skb_checksum_complete(struct sk_buff *skb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_csum_unnecessary(const struct sk_buff *skb)
{
return ((skb->ip_summed == 1) ||
skb->csum_valid ||
(skb->ip_summed == 3 &&
skb_checksum_start_offset(skb) >= 0));
}
# 4281 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __sum16 skb_checksum_complete(struct sk_buff *skb)
{
return skb_csum_unnecessary(skb) ?
0 : __skb_checksum_complete(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
{
if (skb->ip_summed == 1) {
if (skb->csum_level == 0)
skb->ip_summed = 0;
else
skb->csum_level--;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
{
if (skb->ip_summed == 1) {
if (skb->csum_level < 3)
skb->csum_level++;
} else if (skb->ip_summed == 0) {
skb->ip_summed = 1;
skb->csum_level = 0;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
{
if (skb->ip_summed == 1) {
skb->ip_summed = 0;
skb->csum_level = 0;
}
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __skb_checksum_validate_needed(struct sk_buff *skb,
bool zero_okay,
__sum16 check)
{
if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
skb->csum_valid = 1;
__skb_decr_checksum_unnecessary(skb);
return false;
}

return true;
}
# 4345 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_checksum_complete_unset(struct sk_buff *skb)
{
if (skb->ip_summed == 2)
skb->ip_summed = 0;
}
# 4360 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
bool complete,
__wsum psum)
{
if (skb->ip_summed == 2) {
if (!csum_fold(csum_add(psum, skb->csum))) {
skb->csum_valid = 1;
return 0;
}
}

skb->csum = psum;

if (complete || skb->len <= 76) {
__sum16 csum;

csum = __skb_checksum_complete(skb);
skb->csum_valid = !csum;
return csum;
}

return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
{
return 0;
}
# 4426 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __skb_checksum_convert_check(struct sk_buff *skb)
{
return (skb->ip_summed == 0 && skb->csum_valid);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
{
skb->csum = ~pseudo;
skb->ip_summed = 2;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
u16 start, u16 offset)
{
skb->ip_summed = 3;
skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
skb->csum_offset = offset - start;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_remcsum_process(struct sk_buff *skb, void *ptr,
int start, int offset, bool nopartial)
{
__wsum delta;

if (!nopartial) {
skb_remcsum_adjust_partial(skb, ptr, start, offset);
return;
}

if (__builtin_expect(!!(skb->ip_summed != 2), 0)) {
__skb_checksum_complete(skb);
skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
}

delta = remcsum_adjust(ptr, skb->csum, start, offset);


skb->csum = csum_add(skb->csum, delta);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
{



return ((void *)0);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long skb_get_nfct(const struct sk_buff *skb)
{



return 0UL;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
{




}
# 4612 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_ext_put(struct sk_buff *skb) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_ext_reset(struct sk_buff *skb) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_ext_del(struct sk_buff *skb, int unused) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_has_extensions(struct sk_buff *skb) { return false; }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void nf_reset_ct(struct sk_buff *skb)
{




}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void nf_reset_trace(struct sk_buff *skb)
{



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipvs_reset(struct sk_buff *skb)
{



}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
bool copy)
{
# 4654 "./include/linux/skbuff.h"
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
{



dst->slow_gro = src->slow_gro;
__nf_copy(dst, src, true);
}
# 4676 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_init_secmark(struct sk_buff *skb)
{ }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int secpath_exists(const struct sk_buff *skb)
{



return 0;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_irq_freeable(const struct sk_buff *skb)
{
return !skb->destructor &&
!secpath_exists(skb) &&
!skb_nfct(skb) &&
!skb->_skb_refdst &&
!skb_has_frag_list(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
{
skb->queue_mapping = queue_mapping;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 skb_get_queue_mapping(const struct sk_buff *skb)
{
return skb->queue_mapping;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
{
to->queue_mapping = from->queue_mapping;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
{
skb->queue_mapping = rx_queue + 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 skb_get_rx_queue(const struct sk_buff *skb)
{
return skb->queue_mapping - 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_rx_queue_recorded(const struct sk_buff *skb)
{
return skb->queue_mapping != 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
{
skb->dst_pending_confirm = val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
{
return skb->dst_pending_confirm != 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sec_path *skb_sec_path(const struct sk_buff *skb)
{



return ((void *)0);

}







struct skb_gso_cb {
union {
int mac_offset;
int data_offset;
};
int encap_level;
__wsum csum;
__u16 csum_start;
};



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_tnl_header_len(const struct sk_buff *inner_skb)
{
return (skb_mac_header(inner_skb) - inner_skb->head) -
((struct skb_gso_cb *)((inner_skb)->cb + 32))->mac_offset;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int gso_pskb_expand_head(struct sk_buff *skb, int extra)
{
int new_headroom, headroom;
int ret;

headroom = skb_headroom(skb);
ret = pskb_expand_head(skb, extra, 0, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
if (ret)
return ret;

new_headroom = skb_headroom(skb);
((struct skb_gso_cb *)((skb)->cb + 32))->mac_offset += (new_headroom - headroom);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void gso_reset_checksum(struct sk_buff *skb, __wsum res)
{

if (skb->remcsum_offload)
return;

((struct skb_gso_cb *)((skb)->cb + 32))->csum = res;
((struct skb_gso_cb *)((skb)->cb + 32))->csum_start = skb_checksum_start(skb) - skb->head;
}
# 4807 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
{
unsigned char *csum_start = skb_transport_header(skb);
int plen = (skb->head + ((struct skb_gso_cb *)((skb)->cb + 32))->csum_start) - csum_start;
__wsum partial = ((struct skb_gso_cb *)((skb)->cb + 32))->csum;

((struct skb_gso_cb *)((skb)->cb + 32))->csum = res;
((struct skb_gso_cb *)((skb)->cb + 32))->csum_start = csum_start - skb->head;

return csum_fold(csum_partial(csum_start, plen, partial));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_is_gso(const struct sk_buff *skb)
{
return ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_size;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_is_gso_v6(const struct sk_buff *skb)
{
return ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type & SKB_GSO_TCPV6;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_is_gso_sctp(const struct sk_buff *skb)
{
return ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type & SKB_GSO_SCTP;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_is_gso_tcp(const struct sk_buff *skb)
{
return ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_gso_reset(struct sk_buff *skb)
{
((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_size = 0;
((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs = 0;
((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_increase_gso_size(struct skb_shared_info *shinfo,
u16 increment)
{
if (({ int __ret_warn_on = !!(shinfo->gso_size == 0xFFFF); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (4852), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }))
return;
shinfo->gso_size += increment;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_decrease_gso_size(struct skb_shared_info *shinfo,
u16 decrement)
{
if (({ int __ret_warn_on = !!(shinfo->gso_size == 0xFFFF); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (4860), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }))
return;
shinfo->gso_size -= decrement;
}

void __skb_warn_lro_forwarding(const struct sk_buff *skb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_warn_if_lro(const struct sk_buff *skb)
{


const struct skb_shared_info *shinfo = ((struct skb_shared_info *)(skb_end_pointer(skb)));

if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
__builtin_expect(!!(shinfo->gso_type == 0), 0)) {
__skb_warn_lro_forwarding(skb);
return true;
}
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_forward_csum(struct sk_buff *skb)
{

if (skb->ip_summed == 2)
skb->ip_summed = 0;
}
# 4896 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_checksum_none_assert(const struct sk_buff *skb)
{



}

bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);

int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
unsigned int transport_len,
__sum16(*skb_chkf)(struct sk_buff *skb));
# 4919 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_head_is_locked(const struct sk_buff *skb)
{
return !skb->head_frag || skb_cloned(skb);
}
# 4933 "./include/linux/skbuff.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __wsum lco_csum(struct sk_buff *skb)
{
unsigned char *csum_start = skb_checksum_start(skb);
unsigned char *l4_hdr = skb_transport_header(skb);
__wsum partial;


partial = ~csum_unfold(*( __sum16 *)(csum_start +
skb->csum_offset));




return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_is_redirected(const struct sk_buff *skb)
{
return skb->redirected;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
{
skb->redirected = 1;





}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_reset_redirect(struct sk_buff *skb)
{
skb->redirected = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_csum_is_sctp(struct sk_buff *skb)
{
return skb->csum_not_inet;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_kcov_handle(struct sk_buff *skb,
const u64 kcov_handle)
{



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 skb_get_kcov_handle(struct sk_buff *skb)
{



return 0;

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_mark_for_recycle(struct sk_buff *skb)
{
skb->pp_recycle = 1;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_pp_recycle(struct sk_buff *skb, void *data)
{
if (!1 || !skb->pp_recycle)
return false;
return page_pool_return_skb_page(((((struct page *)((kernel_map.page_offset - ((((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2) >> 1)) - ((((1UL))) << (((pgtable_l5_enabled ? 57 : (pgtable_l4_enabled ? 48 : 39)) - (12) - 1 + (( __builtin_constant_p(sizeof(struct page)) ? ( ((sizeof(struct page)) == 0 || (sizeof(struct page)) == 1) ? 0 : ( __builtin_constant_p((sizeof(struct page)) - 1) ? (((sizeof(struct page)) - 1) < 2 ? 0 : 63 - __builtin_clzll((sizeof(struct page)) - 1)) : (sizeof((sizeof(struct page)) - 1) <= 4) ? __ilog2_u32((sizeof(struct page)) - 1) : __ilog2_u64((sizeof(struct page)) - 1) ) + 1) : __order_base_2(sizeof(struct page)) ))))))) + (((((({ unsigned long _x = (unsigned long)(data); ((_x) >= kernel_map.page_offset && (!1 || (_x) < kernel_map.page_offset + (((((1UL) << (12)) / sizeof(pgd_t)) / 2 * ((1UL) << (pgtable_l5_enabled ? 48 : (pgtable_l4_enabled ? 39 : 30)))) / 2))) ? ((unsigned long)(_x) - kernel_map.va_pa_offset) : ({ unsigned long _y = _x; (0 && _y < kernel_map.virt_addr + 0) ? ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - 0); }); })) >> (12))))))));
}
# 41 "./include/net/net_namespace.h" 2


struct user_namespace;
struct proc_dir_entry;
struct net_device;
struct sock;
struct ctl_table_header;
struct net_generic;
struct uevent_sock;
struct netns_ipvs;
struct bpf_prog;





struct net {



refcount_t passive;


spinlock_t rules_mod_lock;

atomic_t dev_unreg_count;

unsigned int dev_base_seq;
int ifindex;

spinlock_t nsid_lock;
atomic_t fnhe_genid;

struct list_head list;
struct list_head exit_list;





struct llist_node cleanup_list;


struct key_tag *key_domain;

struct user_namespace *user_ns;
struct ucounts *ucounts;
struct idr netns_ids;

struct ns_common ns;
struct ref_tracker_dir refcnt_tracker;

struct list_head dev_base_head;
struct proc_dir_entry *proc_net;
struct proc_dir_entry *proc_net_stat;


struct ctl_table_set sysctls;


struct sock *rtnl;
struct sock *genl_sock;

struct uevent_sock *uevent_sock;

struct hlist_head *dev_name_head;
struct hlist_head *dev_index_head;
struct raw_notifier_head netdev_chain;




u32 hash_mix;

struct net_device *loopback_dev;


struct list_head rules_ops;

struct netns_core core;
struct netns_mib mib;
struct netns_packet packet;
struct netns_unix unx;
struct netns_nexthop nexthop;
struct netns_ipv4 ipv4;

struct netns_ipv6 ipv6;
# 147 "./include/net/net_namespace.h"
struct net_generic *gen;


struct netns_bpf bpf;






u64 net_cookie;
# 177 "./include/net/net_namespace.h"
struct sock *diag_nlsk;



} ;


# 1 "./include/linux/seq_file_net.h" 1




# 1 "./include/linux/seq_file.h" 1






# 1 "./include/linux/string_helpers.h" 1





# 1 "./include/linux/ctype.h" 1
# 21 "./include/linux/ctype.h"
extern const unsigned char _ctype[];
# 43 "./include/linux/ctype.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int isdigit(int c)
{
return '0' <= c && c <= '9';
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned char __tolower(unsigned char c)
{
if ((((_ctype[(int)(unsigned char)(c)])&(0x01)) != 0))
c -= 'A'-'a';
return c;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned char __toupper(unsigned char c)
{
if ((((_ctype[(int)(unsigned char)(c)])&(0x02)) != 0))
c -= 'a'-'A';
return c;
}
# 70 "./include/linux/ctype.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) char _tolower(const char c)
{
return c | 0x20;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int isodigit(const char c)
{
return c >= '0' && c <= '7';
}
# 7 "./include/linux/string_helpers.h" 2



struct device;
struct file;
struct task_struct;



enum string_size_units {
STRING_UNITS_10,
STRING_UNITS_2,
};

void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
char *buf, int len);
# 33 "./include/linux/string_helpers.h"
int string_unescape(char *src, char *dst, size_t size, unsigned int flags);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int string_unescape_inplace(char *buf, unsigned int flags)
{
return string_unescape(buf, buf, 0, flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int string_unescape_any(char *src, char *dst, size_t size)
{
return string_unescape(src, dst, size, (((((1UL))) << (0)) | ((((1UL))) << (1)) | ((((1UL))) << (2)) | ((((1UL))) << (3))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int string_unescape_any_inplace(char *buf)
{
return string_unescape_any(buf, buf, 0);
}
# 65 "./include/linux/string_helpers.h"
int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
unsigned int flags, const char *only);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int string_escape_mem_any_np(const char *src, size_t isz,
char *dst, size_t osz, const char *only)
{
return string_escape_mem(src, isz, dst, osz, ((((((1UL))) << (0)) | ((((1UL))) << (3)) | ((((1UL))) << (1)) | ((((1UL))) << (2))) | ((((1UL))) << (4))), only);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int string_escape_str(const char *src, char *dst, size_t sz,
unsigned int flags, const char *only)
{
return string_escape_mem(src, strlen(src), dst, sz, flags, only);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int string_escape_str_any_np(const char *src, char *dst,
size_t sz, const char *only)
{
return string_escape_str(src, dst, sz, ((((((1UL))) << (0)) | ((((1UL))) << (3)) | ((((1UL))) << (1)) | ((((1UL))) << (2))) | ((((1UL))) << (4))), only);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void string_upper(char *dst, const char *src)
{
do {
*dst++ = __toupper(*src);
} while (*src++);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void string_lower(char *dst, const char *src)
{
do {
*dst++ = __tolower(*src);
} while (*src++);
}

char *kstrdup_quotable(const char *src, gfp_t gfp);
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
char *kstrdup_quotable_file(struct file *file, gfp_t gfp);

char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n);
void kfree_strarray(char **array, size_t n);

char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *str_yes_no(bool v)
{
return v ? "yes" : "no";
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *str_on_off(bool v)
{
return v ? "on" : "off";
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *str_enable_disable(bool v)
{
return v ? "enable" : "disable";
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *str_enabled_disabled(bool v)
{
return v ? "enabled" : "disabled";
}
# 8 "./include/linux/seq_file.h" 2







struct seq_operations;

struct seq_file {
char *buf;
size_t size;
size_t from;
size_t count;
size_t pad_until;
loff_t index;
loff_t read_pos;
struct mutex lock;
const struct seq_operations *op;
int poll_event;
const struct file *file;
void *private;
};

struct seq_operations {
void * (*start) (struct seq_file *m, loff_t *pos);
void (*stop) (struct seq_file *m, void *v);
void * (*next) (struct seq_file *m, void *v, loff_t *pos);
int (*show) (struct seq_file *m, void *v);
};
# 51 "./include/linux/seq_file.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool seq_has_overflowed(struct seq_file *m)
{
return m->count == m->size;
}
# 64 "./include/linux/seq_file.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) size_t seq_get_buf(struct seq_file *m, char **bufp)
{
do { if (__builtin_expect(!!(m->count > m->size), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/seq_file.h"), "i" (66), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
if (m->count < m->size)
*bufp = m->buf + m->count;
else
*bufp = ((void *)0);

return m->size - m->count;
}
# 84 "./include/linux/seq_file.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void seq_commit(struct seq_file *m, int num)
{
if (num < 0) {
m->count = m->size;
} else {
do { if (__builtin_expect(!!(m->count + num > m->size), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/seq_file.h"), "i" (89), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
m->count += num;
}
}
# 102 "./include/linux/seq_file.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void seq_setwidth(struct seq_file *m, size_t size)
{
m->pad_until = m->count + size;
}
void seq_pad(struct seq_file *m, char c);

char *mangle_path(char *s, const char *p, const char *esc);
int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char *, size_t, loff_t *);
ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter);
loff_t seq_lseek(struct file *, loff_t, int);
int seq_release(struct inode *, struct file *);
int seq_write(struct seq_file *seq, const void *data, size_t len);

__attribute__((__format__(printf, 2, 0)))
void seq_vprintf(struct seq_file *m, const char *fmt, va_list args);
__attribute__((__format__(printf, 2, 3)))
void seq_printf(struct seq_file *m, const char *fmt, ...);
void seq_putc(struct seq_file *m, char c);
void seq_puts(struct seq_file *m, const char *s);
void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter,
unsigned long long num, unsigned int width);
void seq_put_decimal_ull(struct seq_file *m, const char *delimiter,
unsigned long long num);
void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num);
void seq_put_hex_ll(struct seq_file *m, const char *delimiter,
unsigned long long v, unsigned int width);

void seq_escape_mem(struct seq_file *m, const char *src, size_t len,
unsigned int flags, const char *esc);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void seq_escape_str(struct seq_file *m, const char *src,
unsigned int flags, const char *esc)
{
seq_escape_mem(m, src, strlen(src), flags, esc);
}
# 150 "./include/linux/seq_file.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void seq_escape(struct seq_file *m, const char *s, const char *esc)
{
seq_escape_str(m, s, ((((1UL))) << (3)), esc);
}

void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, const void *buf, size_t len,
bool ascii);

int seq_path(struct seq_file *, const struct path *, const char *);
int seq_file_path(struct seq_file *, struct file *, const char *);
int seq_dentry(struct seq_file *, struct dentry *, const char *);
int seq_path_root(struct seq_file *m, const struct path *path,
const struct path *root, const char *esc);

void *single_start(struct seq_file *, loff_t *);
int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
int single_release(struct inode *, struct file *);
void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);


void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary);
# 223 "./include/linux/seq_file.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct user_namespace *seq_user_ns(struct seq_file *seq)
{

return seq->file->f_cred->user_ns;




}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void seq_show_option(struct seq_file *m, const char *name,
const char *value)
{
seq_putc(m, ',');
seq_escape(m, name, ",= \t\n\\");
if (value) {
seq_putc(m, '=');
seq_escape(m, value, ", \t\n\\");
}
}
# 273 "./include/linux/seq_file.h"
extern struct list_head *seq_list_start(struct list_head *head,
loff_t pos);
extern struct list_head *seq_list_start_head(struct list_head *head,
loff_t pos);
extern struct list_head *seq_list_next(void *v, struct list_head *head,
loff_t *ppos);





extern struct hlist_node *seq_hlist_start(struct hlist_head *head,
loff_t pos);
extern struct hlist_node *seq_hlist_start_head(struct hlist_head *head,
loff_t pos);
extern struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head,
loff_t *ppos);

extern struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head,
loff_t pos);
extern struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head,
loff_t pos);
extern struct hlist_node *seq_hlist_next_rcu(void *v,
struct hlist_head *head,
loff_t *ppos);


extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head *head, int *cpu, loff_t pos);

extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head *head, int *cpu, loff_t *pos);

void seq_file_init(void);
# 6 "./include/linux/seq_file_net.h" 2

struct net;
extern struct net init_net;

struct seq_net_private {

struct net *net;
netns_tracker ns_tracker;

};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net *seq_file_net(struct seq_file *seq)
{

return ((struct seq_net_private *)seq->private)->net;



}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net *seq_file_single_net(struct seq_file *seq)
{

return (struct net *)seq->private;



}
# 184 "./include/net/net_namespace.h" 2


extern struct net init_net;


struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
struct net *old_net);

void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);

void net_ns_barrier(void);

struct ns_common *get_net_ns(struct ns_common *ns);
struct net *get_net_ns_by_fd(int fd);
# 230 "./include/net/net_namespace.h"
extern struct list_head net_namespace_list;

struct net *get_net_ns_by_pid(pid_t pid);


void ipx_register_sysctl(void);
void ipx_unregister_sysctl(void);






void __put_net(struct net *net);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net *get_net(struct net *net)
{
refcount_inc(&net->ns.count);
return net;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net *maybe_get_net(struct net *net)
{





if (!refcount_inc_not_zero(&net->ns.count))
net = ((void *)0);
return net;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_net(struct net *net)
{
if (refcount_dec_and_test(&net->ns.count))
__put_net(net);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int net_eq(const struct net *net1, const struct net *net2)
{
return net1 == net2;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int check_net(const struct net *net)
{
return refcount_read(&net->ns.count) != 0;
}

void net_drop_ns(void *);
# 315 "./include/net/net_namespace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netns_tracker_alloc(struct net *net,
netns_tracker *tracker, gfp_t gfp)
{



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netns_tracker_free(struct net *net,
netns_tracker *tracker)
{



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net *get_net_track(struct net *net,
netns_tracker *tracker, gfp_t gfp)
{
get_net(net);
netns_tracker_alloc(net, tracker, gfp);
return net;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_net_track(struct net *net, netns_tracker *tracker)
{
netns_tracker_free(net, tracker);
put_net(net);
}

typedef struct {

struct net *net;

} possible_net_t;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void write_pnet(possible_net_t *pnet, struct net *net)
{

pnet->net = net;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net *read_pnet(const possible_net_t *pnet)
{

return pnet->net;



}
# 387 "./include/net/net_namespace.h"
int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
int peernet2id(const struct net *net, struct net *peer);
bool peernet_has_id(const struct net *net, struct net *peer);
struct net *get_net_ns_by_id(const struct net *net, int id);

struct pernet_operations {
struct list_head list;
# 416 "./include/net/net_namespace.h"
int (*init)(struct net *net);
void (*pre_exit)(struct net *net);
void (*exit)(struct net *net);
void (*exit_batch)(struct list_head *net_exit_list);
unsigned int *id;
size_t size;
};
# 443 "./include/net/net_namespace.h"
int register_pernet_subsys(struct pernet_operations *);
void unregister_pernet_subsys(struct pernet_operations *);
int register_pernet_device(struct pernet_operations *);
void unregister_pernet_device(struct pernet_operations *);

struct ctl_table;


int net_sysctl_init(void);
struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
struct ctl_table *table);
void unregister_net_sysctl_table(struct ctl_table_header *header);
# 467 "./include/net/net_namespace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rt_genid_ipv4(const struct net *net)
{
return atomic_read(&net->ipv4.rt_genid);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rt_genid_ipv6(const struct net *net)
{
return atomic_read(&net->ipv6.fib6_sernum);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rt_genid_bump_ipv4(struct net *net)
{
atomic_inc(&net->ipv4.rt_genid);
}

extern void (*__fib6_flush_trees)(struct net *net);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rt_genid_bump_ipv6(struct net *net)
{
if (__fib6_flush_trees)
__fib6_flush_trees(net);
}
# 500 "./include/net/net_namespace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rt_genid_bump_all(struct net *net)
{
rt_genid_bump_ipv4(net);
rt_genid_bump_ipv6(net);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int fnhe_genid(const struct net *net)
{
return atomic_read(&net->fnhe_genid);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fnhe_genid_bump(struct net *net)
{
atomic_inc(&net->fnhe_genid);
}


void net_ns_init(void);
# 39 "./include/linux/netdevice.h" 2



# 1 "./include/net/netprio_cgroup.h" 1
# 11 "./include/net/netprio_cgroup.h"
# 1 "./include/linux/cgroup.h" 1
# 16 "./include/linux/cgroup.h"
# 1 "./include/uapi/linux/cgroupstats.h" 1
# 20 "./include/uapi/linux/cgroupstats.h"
# 1 "./include/uapi/linux/taskstats.h" 1
# 41 "./include/uapi/linux/taskstats.h"
struct taskstats {





__u16 version;
__u32 ac_exitcode;




__u8 ac_flag;
__u8 ac_nice;
# 72 "./include/uapi/linux/taskstats.h"
__u64 cpu_count __attribute__((aligned(8)));
__u64 cpu_delay_total;






__u64 blkio_count;
__u64 blkio_delay_total;


__u64 swapin_count;
__u64 swapin_delay_total;







__u64 cpu_run_real_total;







__u64 cpu_run_virtual_total;




char ac_comm[32];
__u8 ac_sched __attribute__((aligned(8)));

__u8 ac_pad[3];
__u32 ac_uid __attribute__((aligned(8)));

__u32 ac_gid;
__u32 ac_pid;
__u32 ac_ppid;

__u32 ac_btime;
__u64 ac_etime __attribute__((aligned(8)));

__u64 ac_utime;
__u64 ac_stime;
__u64 ac_minflt;
__u64 ac_majflt;
# 132 "./include/uapi/linux/taskstats.h"
__u64 coremem;



__u64 virtmem;




__u64 hiwater_rss;
__u64 hiwater_vm;


__u64 read_char;
__u64 write_char;
__u64 read_syscalls;
__u64 write_syscalls;




__u64 read_bytes;
__u64 write_bytes;
__u64 cancelled_write_bytes;

__u64 nvcsw;
__u64 nivcsw;


__u64 ac_utimescaled;
__u64 ac_stimescaled;
__u64 cpu_scaled_run_real_total;


__u64 freepages_count;
__u64 freepages_delay_total;


__u64 thrashing_count;
__u64 thrashing_delay_total;


__u64 ac_btime64;


__u64 compact_count;
__u64 compact_delay_total;
};
# 188 "./include/uapi/linux/taskstats.h"
enum {
TASKSTATS_CMD_UNSPEC = 0,
TASKSTATS_CMD_GET,
TASKSTATS_CMD_NEW,
__TASKSTATS_CMD_MAX,
};



enum {
TASKSTATS_TYPE_UNSPEC = 0,
TASKSTATS_TYPE_PID,
TASKSTATS_TYPE_TGID,
TASKSTATS_TYPE_STATS,
TASKSTATS_TYPE_AGGR_PID,
TASKSTATS_TYPE_AGGR_TGID,
TASKSTATS_TYPE_NULL,
__TASKSTATS_TYPE_MAX,
};



enum {
TASKSTATS_CMD_ATTR_UNSPEC = 0,
TASKSTATS_CMD_ATTR_PID,
TASKSTATS_CMD_ATTR_TGID,
TASKSTATS_CMD_ATTR_REGISTER_CPUMASK,
TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK,
__TASKSTATS_CMD_ATTR_MAX,
};
# 21 "./include/uapi/linux/cgroupstats.h" 2
# 32 "./include/uapi/linux/cgroupstats.h"
struct cgroupstats {
__u64 nr_sleeping;
__u64 nr_running;
__u64 nr_stopped;
__u64 nr_uninterruptible;

__u64 nr_io_wait;
};







enum {
CGROUPSTATS_CMD_UNSPEC = __TASKSTATS_CMD_MAX,
CGROUPSTATS_CMD_GET,
CGROUPSTATS_CMD_NEW,
__CGROUPSTATS_CMD_MAX,
};



enum {
CGROUPSTATS_TYPE_UNSPEC = 0,
CGROUPSTATS_TYPE_CGROUP_STATS,
__CGROUPSTATS_TYPE_MAX,
};



enum {
CGROUPSTATS_CMD_ATTR_UNSPEC = 0,
CGROUPSTATS_CMD_ATTR_FD,
__CGROUPSTATS_CMD_ATTR_MAX,
};
# 17 "./include/linux/cgroup.h" 2






# 1 "./include/linux/nsproxy.h" 1







struct mnt_namespace;
struct uts_namespace;
struct ipc_namespace;
struct pid_namespace;
struct cgroup_namespace;
struct fs_struct;
# 31 "./include/linux/nsproxy.h"
struct nsproxy {
atomic_t count;
struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
struct pid_namespace *pid_ns_for_children;
struct net *net_ns;
struct time_namespace *time_ns;
struct time_namespace *time_ns_for_children;
struct cgroup_namespace *cgroup_ns;
};
extern struct nsproxy init_nsproxy;
# 53 "./include/linux/nsproxy.h"
struct nsset {
unsigned flags;
struct nsproxy *nsproxy;
struct fs_struct *fs;
const struct cred *cred;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cred *nsset_cred(struct nsset *set)
{
if (set->flags & 0x10000000)
return (struct cred *)set->cred;

return ((void *)0);
}
# 94 "./include/linux/nsproxy.h"
int copy_namespaces(unsigned long flags, struct task_struct *tsk);
void exit_task_namespaces(struct task_struct *tsk);
void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
void free_nsproxy(struct nsproxy *ns);
int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
struct cred *, struct fs_struct *);
int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) nsproxy_cache_init(void);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_nsproxy(struct nsproxy *ns)
{
if (atomic_dec_and_test(&ns->count)) {
free_nsproxy(ns);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void get_nsproxy(struct nsproxy *ns)
{
atomic_inc(&ns->count);
}
# 24 "./include/linux/cgroup.h" 2
# 1 "./include/linux/user_namespace.h" 1
# 17 "./include/linux/user_namespace.h"
struct uid_gid_extent {
u32 first;
u32 lower_first;
u32 count;
};

struct uid_gid_map {
u32 nr_extents;
union {
struct uid_gid_extent extent[5];
struct {
struct uid_gid_extent *forward;
struct uid_gid_extent *reverse;
};
};
};





struct ucounts;

enum ucount_type {
UCOUNT_USER_NAMESPACES,
UCOUNT_PID_NAMESPACES,
UCOUNT_UTS_NAMESPACES,
UCOUNT_IPC_NAMESPACES,
UCOUNT_NET_NAMESPACES,
UCOUNT_MNT_NAMESPACES,
UCOUNT_CGROUP_NAMESPACES,
UCOUNT_TIME_NAMESPACES,

UCOUNT_INOTIFY_INSTANCES,
UCOUNT_INOTIFY_WATCHES,





UCOUNT_RLIMIT_NPROC,
UCOUNT_RLIMIT_MSGQUEUE,
UCOUNT_RLIMIT_SIGPENDING,
UCOUNT_RLIMIT_MEMLOCK,
UCOUNT_COUNTS,
};



struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
struct uid_gid_map projid_map;
struct user_namespace *parent;
int level;
kuid_t owner;
kgid_t group;
struct ns_common ns;
unsigned long flags;


bool parent_could_setfcap;







struct list_head keyring_name_list;
struct key *user_keyring_register;
struct rw_semaphore keyring_sem;






struct work_struct work;

struct ctl_table_set set;
struct ctl_table_header *sysctls;

struct ucounts *ucounts;
long ucount_max[UCOUNT_COUNTS];
} ;

struct ucounts {
struct hlist_node node;
struct user_namespace *ns;
kuid_t uid;
atomic_t count;
atomic_long_t ucount[UCOUNT_COUNTS];
};

extern struct user_namespace init_user_ns;
extern struct ucounts init_ucounts;

bool setup_userns_sysctls(struct user_namespace *ns);
void retire_userns_sysctls(struct user_namespace *ns);
struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid);
struct ucounts * __attribute__((__warn_unused_result__)) get_ucounts(struct ucounts *ucounts);
void put_ucounts(struct ucounts *ucounts);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long get_ucounts_value(struct ucounts *ucounts, enum ucount_type type)
{
return atomic_long_read(&ucounts->ucount[type]);
}

long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type);
void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type);
bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_rlimit_ucount_max(struct user_namespace *ns,
enum ucount_type type, unsigned long max)
{
ns->ucount_max[type] = max <= ((long)(~0UL >> 1)) ? max : ((long)(~0UL >> 1));
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct user_namespace *get_user_ns(struct user_namespace *ns)
{
if (ns)
refcount_inc(&ns->ns.count);
return ns;
}

extern int create_user_ns(struct cred *new);
extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
extern void __put_user_ns(struct user_namespace *ns);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_user_ns(struct user_namespace *ns)
{
if (ns && refcount_dec_and_test(&ns->ns.count))
__put_user_ns(ns);
}

struct seq_operations;
extern const struct seq_operations proc_uid_seq_operations;
extern const struct seq_operations proc_gid_seq_operations;
extern const struct seq_operations proc_projid_seq_operations;
extern ssize_t proc_uid_map_write(struct file *, const char *, size_t, loff_t *);
extern ssize_t proc_gid_map_write(struct file *, const char *, size_t, loff_t *);
extern ssize_t proc_projid_map_write(struct file *, const char *, size_t, loff_t *);
extern ssize_t proc_setgroups_write(struct file *, const char *, size_t, loff_t *);
extern int proc_setgroups_show(struct seq_file *m, void *v);
extern bool userns_may_setgroups(const struct user_namespace *ns);
extern bool in_userns(const struct user_namespace *ancestor,
const struct user_namespace *child);
extern bool current_in_userns(const struct user_namespace *target_ns);
struct ns_common *ns_get_owner(struct ns_common *ns);
# 25 "./include/linux/cgroup.h" 2

# 1 "./include/linux/kernel_stat.h" 1
# 20 "./include/linux/kernel_stat.h"
enum cpu_usage_stat {
CPUTIME_USER,
CPUTIME_NICE,
CPUTIME_SYSTEM,
CPUTIME_SOFTIRQ,
CPUTIME_IRQ,
CPUTIME_IDLE,
CPUTIME_IOWAIT,
CPUTIME_STEAL,
CPUTIME_GUEST,
CPUTIME_GUEST_NICE,
NR_STATS,
};

struct kernel_cpustat {
u64 cpustat[NR_STATS];
};

struct kernel_stat {
unsigned long irqs_sum;
unsigned int softirqs[NR_SOFTIRQS];
};

extern __attribute__((section(".data..percpu" ""))) __typeof__(struct kernel_stat) kstat;
extern __attribute__((section(".data..percpu" ""))) __typeof__(struct kernel_cpustat) kernel_cpustat;







extern unsigned long long nr_context_switches(void);

extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
extern void kstat_incr_irq_this_cpu(unsigned int irq);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kstat_incr_softirqs_this_cpu(unsigned int irq)
{
({ __this_cpu_preempt_check("add"); do { do { const void *__vpp_verify = (typeof((&(kstat.softirqs[irq])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(kstat.softirqs[irq])) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&(kstat.softirqs[irq])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(kstat.softirqs[irq]))) *)(&(kstat.softirqs[irq]))); (typeof((typeof(*(&(kstat.softirqs[irq]))) *)(&(kstat.softirqs[irq])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&(kstat.softirqs[irq])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(kstat.softirqs[irq]))) *)(&(kstat.softirqs[irq]))); (typeof((typeof(*(&(kstat.softirqs[irq]))) *)(&(kstat.softirqs[irq])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&(kstat.softirqs[irq])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(kstat.softirqs[irq]))) *)(&(kstat.softirqs[irq]))); (typeof((typeof(*(&(kstat.softirqs[irq]))) *)(&(kstat.softirqs[irq])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&(kstat.softirqs[irq])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(kstat.softirqs[irq]))) *)(&(kstat.softirqs[irq]))); (typeof((typeof(*(&(kstat.softirqs[irq]))) *)(&(kstat.softirqs[irq])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; default: __bad_size_call_parameter();break; } } while (0); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
{
return (*({ do { const void *__vpp_verify = (typeof((&(kstat)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(kstat)))) *)((&(kstat)))); (typeof((typeof(*((&(kstat)))) *)((&(kstat))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })).softirqs[irq];
}




extern unsigned int kstat_irqs_usr(unsigned int irq);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
{
return (*({ do { const void *__vpp_verify = (typeof((&(kstat)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(kstat)))) *)((&(kstat)))); (typeof((typeof(*((&(kstat)))) *)((&(kstat))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })).irqs_sum;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 kcpustat_field(struct kernel_cpustat *kcpustat,
enum cpu_usage_stat usage, int cpu)
{
return kcpustat->cpustat[usage];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
{
*dst = (*({ do { const void *__vpp_verify = (typeof((&(kernel_cpustat)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&(kernel_cpustat)))) *)((&(kernel_cpustat)))); (typeof((typeof(*((&(kernel_cpustat)))) *)((&(kernel_cpustat))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }));
}



extern void account_user_time(struct task_struct *, u64);
extern void account_guest_time(struct task_struct *, u64);
extern void account_system_time(struct task_struct *, int, u64);
extern void account_system_index_time(struct task_struct *, u64,
enum cpu_usage_stat);
extern void account_steal_time(u64);
extern void account_idle_time(u64);
extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu);







extern void account_process_tick(struct task_struct *, int user);


extern void account_idle_ticks(unsigned long ticks);
# 27 "./include/linux/cgroup.h" 2

# 1 "./include/linux/cgroup-defs.h" 1
# 22 "./include/linux/cgroup-defs.h"
# 1 "./include/linux/bpf-cgroup-defs.h" 1
# 11 "./include/linux/bpf-cgroup-defs.h"
struct bpf_prog_array;

enum cgroup_bpf_attach_type {
CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
CGROUP_INET_INGRESS = 0,
CGROUP_INET_EGRESS,
CGROUP_INET_SOCK_CREATE,
CGROUP_SOCK_OPS,
CGROUP_DEVICE,
CGROUP_INET4_BIND,
CGROUP_INET6_BIND,
CGROUP_INET4_CONNECT,
CGROUP_INET6_CONNECT,
CGROUP_INET4_POST_BIND,
CGROUP_INET6_POST_BIND,
CGROUP_UDP4_SENDMSG,
CGROUP_UDP6_SENDMSG,
CGROUP_SYSCTL,
CGROUP_UDP4_RECVMSG,
CGROUP_UDP6_RECVMSG,
CGROUP_GETSOCKOPT,
CGROUP_SETSOCKOPT,
CGROUP_INET4_GETPEERNAME,
CGROUP_INET6_GETPEERNAME,
CGROUP_INET4_GETSOCKNAME,
CGROUP_INET6_GETSOCKNAME,
CGROUP_INET_SOCK_RELEASE,
MAX_CGROUP_BPF_ATTACH_TYPE
};

struct cgroup_bpf {

struct bpf_prog_array *effective[MAX_CGROUP_BPF_ATTACH_TYPE];






struct list_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
u32 flags[MAX_CGROUP_BPF_ATTACH_TYPE];


struct list_head storages;


struct bpf_prog_array *inactive;


struct percpu_ref refcnt;


struct work_struct release_work;
};
# 23 "./include/linux/cgroup-defs.h" 2
# 1 "./include/linux/psi_types.h" 1




# 1 "./include/linux/kthread.h" 1







struct mm_struct;

__attribute__((__format__(printf, 4, 5)))
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
void *data,
int node,
const char namefmt[], ...);
# 31 "./include/linux/kthread.h"
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
void *data,
unsigned int cpu,
const char *namefmt);

void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk);
bool set_kthread_struct(struct task_struct *p);

void kthread_set_per_cpu(struct task_struct *k, int cpu);
bool kthread_is_per_cpu(struct task_struct *k);
# 72 "./include/linux/kthread.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct task_struct *
kthread_run_on_cpu(int (*threadfn)(void *data), void *data,
unsigned int cpu, const char *namefmt)
{
struct task_struct *p;

p = kthread_create_on_cpu(threadfn, data, cpu, namefmt);
if (!IS_ERR(p))
wake_up_process(p);

return p;
}

void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
bool kthread_should_stop(void);
bool kthread_should_park(void);
bool __kthread_should_park(struct task_struct *k);
bool kthread_freezable_should_stop(bool *was_frozen);
void *kthread_func(struct task_struct *k);
void *kthread_data(struct task_struct *k);
void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
void kthread_exit(long result) __attribute__((__noreturn__));
void kthread_complete_and_exit(struct completion *, long) __attribute__((__noreturn__));

int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
extern int tsk_fork_get_node(struct task_struct *tsk);
# 114 "./include/linux/kthread.h"
struct kthread_work;
typedef void (*kthread_work_func_t)(struct kthread_work *work);
void kthread_delayed_work_timer_fn(struct timer_list *t);

enum {
KTW_FREEZABLE = 1 << 0,
};

struct kthread_worker {
unsigned int flags;
raw_spinlock_t lock;
struct list_head work_list;
struct list_head delayed_work_list;
struct task_struct *task;
struct kthread_work *current_work;
};

struct kthread_work {
struct list_head node;
kthread_work_func_t func;
struct kthread_worker *worker;

int canceling;
};

struct kthread_delayed_work {
struct kthread_work work;
struct timer_list timer;
};
# 162 "./include/linux/kthread.h"
extern void __kthread_init_worker(struct kthread_worker *worker,
const char *name, struct lock_class_key *key);
# 186 "./include/linux/kthread.h"
int kthread_worker_fn(void *worker_ptr);

__attribute__((__format__(printf, 2, 3)))
struct kthread_worker *
kthread_create_worker(unsigned int flags, const char namefmt[], ...);

__attribute__((__format__(printf, 3, 4))) struct kthread_worker *
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
const char namefmt[], ...);

bool kthread_queue_work(struct kthread_worker *worker,
struct kthread_work *work);

bool kthread_queue_delayed_work(struct kthread_worker *worker,
struct kthread_delayed_work *dwork,
unsigned long delay);

bool kthread_mod_delayed_work(struct kthread_worker *worker,
struct kthread_delayed_work *dwork,
unsigned long delay);

void kthread_flush_work(struct kthread_work *work);
void kthread_flush_worker(struct kthread_worker *worker);

bool kthread_cancel_work_sync(struct kthread_work *work);
bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);

void kthread_destroy_worker(struct kthread_worker *worker);

void kthread_use_mm(struct mm_struct *mm);
void kthread_unuse_mm(struct mm_struct *mm);

struct cgroup_subsys_state;





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void kthread_associate_blkcg(struct cgroup_subsys_state *css) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cgroup_subsys_state *kthread_blkcg(void)
{
return ((void *)0);
}
# 6 "./include/linux/psi_types.h" 2
# 191 "./include/linux/psi_types.h"
struct psi_group { };
# 24 "./include/linux/cgroup-defs.h" 2



struct cgroup;
struct cgroup_root;
struct cgroup_subsys;
struct cgroup_taskset;
struct kernfs_node;
struct kernfs_ops;
struct kernfs_open_file;
struct seq_file;
struct poll_table_struct;







enum cgroup_subsys_id {

# 1 "./include/linux/cgroup_subsys.h" 1
# 17 "./include/linux/cgroup_subsys.h"
cpu_cgrp_id,
# 45 "./include/linux/cgroup-defs.h" 2
CGROUP_SUBSYS_COUNT,
};



enum {
CSS_NO_REF = (1 << 0),
CSS_ONLINE = (1 << 1),
CSS_RELEASED = (1 << 2),
CSS_VISIBLE = (1 << 3),
CSS_DYING = (1 << 4),
};


enum {

CGRP_NOTIFY_ON_RELEASE,





CGRP_CPUSET_CLONE_CHILDREN,


CGRP_FREEZE,


CGRP_FROZEN,


CGRP_KILL,
};


enum {
CGRP_ROOT_NOPREFIX = (1 << 1),
CGRP_ROOT_XATTR = (1 << 2),






CGRP_ROOT_NS_DELEGATE = (1 << 3),




CGRP_ROOT_CPUSET_V2_MODE = (1 << 4),




CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5),




CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 6),
};


enum {
CFTYPE_ONLY_ON_ROOT = (1 << 0),
CFTYPE_NOT_ON_ROOT = (1 << 1),
CFTYPE_NS_DELEGATABLE = (1 << 2),

CFTYPE_NO_PREFIX = (1 << 3),
CFTYPE_WORLD_WRITABLE = (1 << 4),
CFTYPE_DEBUG = (1 << 5),
CFTYPE_PRESSURE = (1 << 6),


__CFTYPE_ONLY_ON_DFL = (1 << 16),
__CFTYPE_NOT_ON_DFL = (1 << 17),
};






struct cgroup_file {

struct kernfs_node *kn;
unsigned long notified_at;
struct timer_list notify_timer;
};
# 142 "./include/linux/cgroup-defs.h"
struct cgroup_subsys_state {

struct cgroup *cgroup;


struct cgroup_subsys *ss;


struct percpu_ref refcnt;


struct list_head sibling;
struct list_head children;


struct list_head rstat_css_node;





int id;

unsigned int flags;







u64 serial_nr;





atomic_t online_cnt;


struct work_struct destroy_work;
struct rcu_work destroy_rwork;





struct cgroup_subsys_state *parent;
};
# 199 "./include/linux/cgroup-defs.h"
struct css_set {





struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];


refcount_t refcount;







struct css_set *dom_cset;


struct cgroup *dfl_cgrp;


int nr_tasks;
# 231 "./include/linux/cgroup-defs.h"
struct list_head tasks;
struct list_head mg_tasks;
struct list_head dying_tasks;


struct list_head task_iters;
# 245 "./include/linux/cgroup-defs.h"
struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];


struct list_head threaded_csets;
struct list_head threaded_csets_node;





struct hlist_node hlist;





struct list_head cgrp_links;





struct list_head mg_preload_node;
struct list_head mg_node;
# 277 "./include/linux/cgroup-defs.h"
struct cgroup *mg_src_cgrp;
struct cgroup *mg_dst_cgrp;
struct css_set *mg_dst_cset;


bool dead;


struct callback_head callback_head;
};

struct cgroup_base_stat {
struct task_cputime cputime;
};
# 312 "./include/linux/cgroup-defs.h"
struct cgroup_rstat_cpu {




struct u64_stats_sync bsync;
struct cgroup_base_stat bstat;





struct cgroup_base_stat last_bstat;
# 337 "./include/linux/cgroup-defs.h"
struct cgroup *updated_children;
struct cgroup *updated_next;
};

struct cgroup_freezer_state {

bool freeze;


int e_freeze;




int nr_frozen_descendants;





int nr_frozen_tasks;
};

struct cgroup {

struct cgroup_subsys_state self;

unsigned long flags;







int level;


int max_depth;
# 388 "./include/linux/cgroup-defs.h"
int nr_descendants;
int nr_dying_descendants;
int max_descendants;
# 403 "./include/linux/cgroup-defs.h"
int nr_populated_csets;
int nr_populated_domain_children;
int nr_populated_threaded_children;

int nr_threaded_children;

struct kernfs_node *kn;
struct cgroup_file procs_file;
struct cgroup_file events_file;
# 420 "./include/linux/cgroup-defs.h"
u16 subtree_control;
u16 subtree_ss_mask;
u16 old_subtree_control;
u16 old_subtree_ss_mask;


struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];

struct cgroup_root *root;





struct list_head cset_links;
# 443 "./include/linux/cgroup-defs.h"
struct list_head e_csets[CGROUP_SUBSYS_COUNT];
# 452 "./include/linux/cgroup-defs.h"
struct cgroup *dom_cgrp;
struct cgroup *old_dom_cgrp;


struct cgroup_rstat_cpu *rstat_cpu;
struct list_head rstat_css_list;


struct cgroup_base_stat last_bstat;
struct cgroup_base_stat bstat;
struct prev_cputime prev_cputime;





struct list_head pidlists;
struct mutex pidlist_mutex;


wait_queue_head_t offline_waitq;


struct work_struct release_agent_work;


struct psi_group psi;


struct cgroup_bpf bpf;


atomic_t congestion_count;


struct cgroup_freezer_state freezer;


u64 ancestor_ids[];
};






struct cgroup_root {
struct kernfs_root *kf_root;


unsigned int subsys_mask;


int hierarchy_id;


struct cgroup cgrp;


u64 cgrp_ancestor_id_storage;


atomic_t nr_cgrps;


struct list_head root_list;


unsigned int flags;


char release_agent_path[4096];


char name[64];
};
# 536 "./include/linux/cgroup-defs.h"
struct cftype {





char name[64];
unsigned long private;





size_t max_write_len;


unsigned int flags;







unsigned int file_offset;





struct cgroup_subsys *ss;
struct list_head node;
struct kernfs_ops *kf_ops;

int (*open)(struct kernfs_open_file *of);
void (*release)(struct kernfs_open_file *of);





u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);



s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);


int (*seq_show)(struct seq_file *sf, void *v);


void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
void (*seq_stop)(struct seq_file *sf, void *v);






int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
u64 val);



int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
s64 val);







ssize_t (*write)(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);

__poll_t (*poll)(struct kernfs_open_file *of,
struct poll_table_struct *pt);


struct lock_class_key lockdep_key;

};





struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
int (*css_online)(struct cgroup_subsys_state *css);
void (*css_offline)(struct cgroup_subsys_state *css);
void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
int (*css_extra_stat_show)(struct seq_file *seq,
struct cgroup_subsys_state *css);

int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
void (*post_attach)(void);
int (*can_fork)(struct task_struct *task,
struct css_set *cset);
void (*cancel_fork)(struct task_struct *task, struct css_set *cset);
void (*fork)(struct task_struct *task);
void (*exit)(struct task_struct *task);
void (*release)(struct task_struct *task);
void (*bind)(struct cgroup_subsys_state *root_css);

bool early_init:1;
# 661 "./include/linux/cgroup-defs.h"
bool implicit_on_dfl:1;
# 673 "./include/linux/cgroup-defs.h"
bool threaded:1;


int id;
const char *name;


const char *legacy_name;


struct cgroup_root *root;


struct idr css_idr;





struct list_head cfts;





struct cftype *dfl_cftypes;
struct cftype *legacy_cftypes;
# 708 "./include/linux/cgroup-defs.h"
unsigned int depends_on;
};

extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
# 720 "./include/linux/cgroup-defs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
percpu_down_read(&cgroup_threadgroup_rwsem);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
percpu_up_read(&cgroup_threadgroup_rwsem);
}
# 761 "./include/linux/cgroup-defs.h"
struct sock_cgroup_data {
struct cgroup *cgroup;






};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{



return 1;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{



return 0;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
u16 prioidx)
{



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
u32 classid)
{



}
# 29 "./include/linux/cgroup.h" 2

struct kernel_clone_args;
# 52 "./include/linux/cgroup.h"
struct css_task_iter {
struct cgroup_subsys *ss;
unsigned int flags;

struct list_head *cset_pos;
struct list_head *cset_head;

struct list_head *tcset_pos;
struct list_head *tcset_head;

struct list_head *task_pos;

struct list_head *cur_tasks_head;
struct css_set *cur_cset;
struct css_set *cur_dcset;
struct task_struct *cur_task;
struct list_head iters_node;
};

extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;



# 1 "./include/linux/cgroup_subsys.h" 1
# 17 "./include/linux/cgroup_subsys.h"
extern struct cgroup_subsys cpu_cgrp_subsys;
# 76 "./include/linux/cgroup.h" 2





# 1 "./include/linux/cgroup_subsys.h" 1
# 17 "./include/linux/cgroup_subsys.h"
extern struct static_key_true cpu_cgrp_subsys_enabled_key; extern struct static_key_true cpu_cgrp_subsys_on_dfl_key;
# 82 "./include/linux/cgroup.h" 2
# 98 "./include/linux/cgroup.h"
bool css_has_online_children(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss);

struct cgroup *cgroup_get_from_path(const char *path);
struct cgroup *cgroup_get_from_fd(int fd);

int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);

int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);

int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);

void cgroup_fork(struct task_struct *p);
extern int cgroup_can_fork(struct task_struct *p,
struct kernel_clone_args *kargs);
extern void cgroup_cancel_fork(struct task_struct *p,
struct kernel_clone_args *kargs);
extern void cgroup_post_fork(struct task_struct *p,
struct kernel_clone_args *kargs);
void cgroup_exit(struct task_struct *p);
void cgroup_release(struct task_struct *p);
void cgroup_free(struct task_struct *p);

int cgroup_init_early(void);
int cgroup_init(void);

int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);





struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *parent);
struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *css);

struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
struct cgroup_subsys_state **dst_cssp);
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
struct cgroup_subsys_state **dst_cssp);

void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
struct css_task_iter *it);
struct task_struct *css_task_iter_next(struct css_task_iter *it);
void css_task_iter_end(struct css_task_iter *it);
# 310 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 cgroup_id(const struct cgroup *cgrp)
{
return cgrp->kn->id;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void css_get(struct cgroup_subsys_state *css)
{
if (!(css->flags & CSS_NO_REF))
percpu_ref_get(&css->refcnt);
}
# 334 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
{
if (!(css->flags & CSS_NO_REF))
percpu_ref_get_many(&css->refcnt, n);
}
# 351 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool css_tryget(struct cgroup_subsys_state *css)
{
if (!(css->flags & CSS_NO_REF))
return percpu_ref_tryget(&css->refcnt);
return true;
}
# 368 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool css_tryget_online(struct cgroup_subsys_state *css)
{
if (!(css->flags & CSS_NO_REF))
return percpu_ref_tryget_live(&css->refcnt);
return true;
}
# 390 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool css_is_dying(struct cgroup_subsys_state *css)
{
return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void css_put(struct cgroup_subsys_state *css)
{
if (!(css->flags & CSS_NO_REF))
percpu_ref_put(&css->refcnt);
}
# 414 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
{
if (!(css->flags & CSS_NO_REF))
percpu_ref_put_many(&css->refcnt, n);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cgroup_get(struct cgroup *cgrp)
{
css_get(&cgrp->self);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cgroup_tryget(struct cgroup *cgrp)
{
return css_tryget(&cgrp->self);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cgroup_put(struct cgroup *cgrp)
{
css_put(&cgrp->self);
}
# 480 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct css_set *task_css_set(struct task_struct *task)
{
return ({ typeof(*((task)->cgroups)) *__UNIQUE_ID_rcu283 = (typeof(*((task)->cgroups)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_284(void) ; if (!((sizeof(((task)->cgroups)) == sizeof(char) || sizeof(((task)->cgroups)) == sizeof(short) || sizeof(((task)->cgroups)) == sizeof(int) || sizeof(((task)->cgroups)) == sizeof(long)) || sizeof(((task)->cgroups)) == sizeof(long long))) __compiletime_assert_284(); } while (0); (*(const volatile typeof( _Generic((((task)->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((task)->cgroups)))) *)&(((task)->cgroups))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*((task)->cgroups)) *)(__UNIQUE_ID_rcu283)); });
}
# 492 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cgroup_subsys_state *task_css(struct task_struct *task,
int subsys_id)
{
return ({ typeof(*(((task))->cgroups)) *__UNIQUE_ID_rcu285 = (typeof(*(((task))->cgroups)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_286(void) ; if (!((sizeof((((task))->cgroups)) == sizeof(char) || sizeof((((task))->cgroups)) == sizeof(short) || sizeof((((task))->cgroups)) == sizeof(int) || sizeof((((task))->cgroups)) == sizeof(long)) || sizeof((((task))->cgroups)) == sizeof(long long))) __compiletime_assert_286(); } while (0); (*(const volatile typeof( _Generic(((((task))->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((((task))->cgroups)))) *)&((((task))->cgroups))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(((task))->cgroups)) *)(__UNIQUE_ID_rcu285)); })->subsys[(subsys_id)];
}
# 507 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cgroup_subsys_state *
task_get_css(struct task_struct *task, int subsys_id)
{
struct cgroup_subsys_state *css;

rcu_read_lock();
while (true) {
css = task_css(task, subsys_id);






if (__builtin_expect(!!(css_tryget(css)), 1))
break;
cpu_relax();
}
rcu_read_unlock();
return css;
}
# 537 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool task_css_is_root(struct task_struct *task, int subsys_id)
{
return ({ typeof(*(((task))->cgroups)) *__UNIQUE_ID_rcu287 = (typeof(*(((task))->cgroups)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_288(void) ; if (!((sizeof((((task))->cgroups)) == sizeof(char) || sizeof((((task))->cgroups)) == sizeof(short) || sizeof((((task))->cgroups)) == sizeof(int) || sizeof((((task))->cgroups)) == sizeof(long)) || sizeof((((task))->cgroups)) == sizeof(long long))) __compiletime_assert_288(); } while (0); (*(const volatile typeof( _Generic(((((task))->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((((task))->cgroups)))) *)&((((task))->cgroups))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(((task))->cgroups)) *)(__UNIQUE_ID_rcu287)); })->subsys[(subsys_id)] ==
init_css_set.subsys[subsys_id];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cgroup *task_cgroup(struct task_struct *task,
int subsys_id)
{
return task_css(task, subsys_id)->cgroup;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cgroup *task_dfl_cgroup(struct task_struct *task)
{
return task_css_set(task)->dfl_cgrp;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cgroup *cgroup_parent(struct cgroup *cgrp)
{
struct cgroup_subsys_state *parent_css = cgrp->self.parent;

if (parent_css)
return ({ void *__mptr = (void *)(parent_css); _Static_assert(__builtin_types_compatible_p(typeof(*(parent_css)), typeof(((struct cgroup *)0)->self)) || __builtin_types_compatible_p(typeof(*(parent_css)), typeof(void)), "pointer type mismatch in container_of()"); ((struct cgroup *)(__mptr - __builtin_offsetof(struct cgroup, self))); });
return ((void *)0);
}
# 572 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cgroup_is_descendant(struct cgroup *cgrp,
struct cgroup *ancestor)
{
if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
return false;
return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor);
}
# 591 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
int ancestor_level)
{
if (cgrp->level < ancestor_level)
return ((void *)0);
while (cgrp && cgrp->level > ancestor_level)
cgrp = cgroup_parent(cgrp);
return cgrp;
}
# 610 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool task_under_cgroup_hierarchy(struct task_struct *task,
struct cgroup *ancestor)
{
struct css_set *cset = task_css_set(task);

return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cgroup_is_populated(struct cgroup *cgrp)
{
return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
cgrp->nr_populated_threaded_children;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ino_t cgroup_ino(struct cgroup *cgrp)
{
return kernfs_ino(cgrp->kn);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cftype *of_cft(struct kernfs_open_file *of)
{
return of->kn->priv;
}

struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cftype *seq_cft(struct seq_file *seq)
{
return of_cft(seq->private);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cgroup_subsys_state *seq_css(struct seq_file *seq)
{
return of_css(seq->private);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
{
return kernfs_name(cgrp->kn, buf, buflen);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
{
return kernfs_path(cgrp->kn, buf, buflen);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pr_cont_cgroup_name(struct cgroup *cgrp)
{
pr_cont_kernfs_name(cgrp->kn);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pr_cont_cgroup_path(struct cgroup *cgrp)
{
pr_cont_kernfs_path(cgrp->kn);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct psi_group *cgroup_psi(struct cgroup *cgrp)
{
return &cgrp->psi;
}

bool cgroup_psi_enabled(void);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cgroup_init_kthreadd(void)
{





get_current()->no_cgroup_migration = 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cgroup_kthread_ready(void)
{




get_current()->no_cgroup_migration = 0;
}

void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
struct cgroup *cgroup_get_from_id(u64 id);
# 766 "./include/linux/cgroup.h"
void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
void cgroup_rstat_flush(struct cgroup *cgrp);
void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
void cgroup_rstat_flush_hold(struct cgroup *cgrp);
void cgroup_rstat_flush_release(void);
# 779 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpuacct_account_field(struct task_struct *tsk, int index,
u64 val) {}


void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
void __cgroup_account_cputime_field(struct cgroup *cgrp,
enum cpu_usage_stat index, u64 delta_exec);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cgroup_account_cputime(struct task_struct *task,
u64 delta_exec)
{
struct cgroup *cgrp;

cpuacct_charge(task, delta_exec);

cgrp = task_dfl_cgroup(task);
if (cgroup_parent(cgrp))
__cgroup_account_cputime(cgrp, delta_exec);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cgroup_account_cputime_field(struct task_struct *task,
enum cpu_usage_stat index,
u64 delta_exec)
{
struct cgroup *cgrp;

cpuacct_account_field(task, index, delta_exec);

cgrp = task_dfl_cgroup(task);
if (cgroup_parent(cgrp))
__cgroup_account_cputime_field(cgrp, index, delta_exec);
}
# 829 "./include/linux/cgroup.h"
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
void cgroup_sk_clone(struct sock_cgroup_data *skcd);
void cgroup_sk_free(struct sock_cgroup_data *skcd);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
{
return skcd->cgroup;
}
# 846 "./include/linux/cgroup.h"
struct cgroup_namespace {
struct ns_common ns;
struct user_namespace *user_ns;
struct ucounts *ucounts;
struct css_set *root_cset;
};

extern struct cgroup_namespace init_cgroup_ns;



void free_cgroup_ns(struct cgroup_namespace *ns);

struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
struct user_namespace *user_ns,
struct cgroup_namespace *old_ns);

int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
struct cgroup_namespace *ns);
# 878 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void get_cgroup_ns(struct cgroup_namespace *ns)
{
if (ns)
refcount_inc(&ns->ns.count);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_cgroup_ns(struct cgroup_namespace *ns)
{
if (ns && refcount_dec_and_test(&ns->ns.count))
free_cgroup_ns(ns);
}



void cgroup_enter_frozen(void);
void cgroup_leave_frozen(bool always_leave);
void cgroup_update_frozen(struct cgroup *cgrp);
void cgroup_freeze(struct cgroup *cgrp, bool freeze);
void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
struct cgroup *dst);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cgroup_task_frozen(struct task_struct *task)
{
return task->frozen;
}
# 916 "./include/linux/cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cgroup_bpf_get(struct cgroup *cgrp)
{
percpu_ref_get(&cgrp->bpf.refcnt);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cgroup_bpf_put(struct cgroup *cgrp)
{
percpu_ref_put(&cgrp->bpf.refcnt);
}
# 12 "./include/net/netprio_cgroup.h" 2
# 44 "./include/net/netprio_cgroup.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 task_netprioidx(struct task_struct *p)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_update_netprioidx(struct sock_cgroup_data *skcd)
{
}
# 43 "./include/linux/netdevice.h" 2
# 1 "./include/net/xdp.h" 1
# 38 "./include/net/xdp.h"
enum xdp_mem_type {
MEM_TYPE_PAGE_SHARED = 0,
MEM_TYPE_PAGE_ORDER0,
MEM_TYPE_PAGE_POOL,
MEM_TYPE_XSK_BUFF_POOL,
MEM_TYPE_MAX,
};





struct xdp_mem_info {
u32 type;
u32 id;
};

struct page_pool;

struct xdp_rxq_info {
struct net_device *dev;
u32 queue_index;
u32 reg_state;
struct xdp_mem_info mem;
unsigned int napi_id;
u32 frag_size;
} __attribute__((__aligned__((1 << 6))));

struct xdp_txq_info {
struct net_device *dev;
};

enum xdp_buff_flags {
XDP_FLAGS_HAS_FRAGS = ((((1UL))) << (0)),
XDP_FLAGS_FRAGS_PF_MEMALLOC = ((((1UL))) << (1)),


};

struct xdp_buff {
void *data;
void *data_end;
void *data_meta;
void *data_hard_start;
struct xdp_rxq_info *rxq;
struct xdp_txq_info *txq;
u32 frame_sz;
u32 flags;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool xdp_buff_has_frags(struct xdp_buff *xdp)
{
return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void xdp_buff_set_frags_flag(struct xdp_buff *xdp)
{
xdp->flags |= XDP_FLAGS_HAS_FRAGS;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
{
xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp)
{
return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp)
{
xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
{
xdp->frame_sz = frame_sz;
xdp->rxq = rxq;
xdp->flags = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
int headroom, int data_len, const bool meta_valid)
{
unsigned char *data = hard_start + headroom;

xdp->data_hard_start = hard_start;
xdp->data = data;
xdp->data_end = data + data_len;
xdp->data_meta = meta_valid ? data : data + 1;
}
# 143 "./include/net/xdp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct skb_shared_info *
xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
{
return (struct skb_shared_info *)((xdp)->data_hard_start + (xdp)->frame_sz - ((((sizeof(struct skb_shared_info))) + ((typeof((sizeof(struct skb_shared_info))))(((1 << 6))) - 1)) & ~((typeof((sizeof(struct skb_shared_info))))(((1 << 6))) - 1)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned int xdp_get_buff_len(struct xdp_buff *xdp)
{
unsigned int len = xdp->data_end - xdp->data;
struct skb_shared_info *sinfo;

if (__builtin_expect(!!(!xdp_buff_has_frags(xdp)), 1))
goto out;

sinfo = xdp_get_shared_info_from_buff(xdp);
len += sinfo->xdp_frags_size;
out:
return len;
}

struct xdp_frame {
void *data;
u16 len;
u16 headroom;
u32 metasize:8;
u32 frame_sz:24;



struct xdp_mem_info mem;
struct net_device *dev_rx;
u32 flags;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool xdp_frame_has_frags(struct xdp_frame *frame)
{
return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame)
{
return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
}


struct xdp_frame_bulk {
int count;
void *xa;
void *q[16];
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
{

bq->xa = ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct skb_shared_info *
xdp_get_shared_info_from_frame(struct xdp_frame *frame)
{
void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);

return (struct skb_shared_info *)(data_hard_start + frame->frame_sz -
((((sizeof(struct skb_shared_info))) + ((typeof((sizeof(struct skb_shared_info))))(((1 << 6))) - 1)) & ~((typeof((sizeof(struct skb_shared_info))))(((1 << 6))) - 1)));
}

struct xdp_cpumap_stats {
unsigned int redirect;
unsigned int pass;
unsigned int drop;
};


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xdp_scrub_frame(struct xdp_frame *frame)
{
frame->data = ((void *)0);
frame->dev_rx = ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
unsigned int size, unsigned int truesize,
bool pfmemalloc)
{
((struct skb_shared_info *)(skb_end_pointer(skb)))->nr_frags = nr_frags;

skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
skb->pfmemalloc |= pfmemalloc;
}


void xdp_warn(const char *msg, const char *func, const int line);


struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct sk_buff *skb,
struct net_device *dev);
struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct net_device *dev);
int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp);
struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
{
xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame);
xdp->data = frame->data;
xdp->data_end = frame->data + frame->len;
xdp->data_meta = frame->data - frame->metasize;
xdp->frame_sz = frame->frame_sz;
xdp->flags = frame->flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int xdp_update_frame_from_buff(struct xdp_buff *xdp,
struct xdp_frame *xdp_frame)
{
int metasize, headroom;


headroom = xdp->data - xdp->data_hard_start;
metasize = xdp->data - xdp->data_meta;
metasize = metasize > 0 ? metasize : 0;
if (__builtin_expect(!!((headroom - metasize) < sizeof(*xdp_frame)), 0))
return -28;


if (__builtin_expect(!!(xdp->data_end > ((xdp)->data_hard_start + (xdp)->frame_sz - ((((sizeof(struct skb_shared_info))) + ((typeof((sizeof(struct skb_shared_info))))(((1 << 6))) - 1)) & ~((typeof((sizeof(struct skb_shared_info))))(((1 << 6))) - 1)))), 0)) {
xdp_warn("Driver BUG: missing reserved tailroom", __func__, 274);
return -28;
}

xdp_frame->data = xdp->data;
xdp_frame->len = xdp->data_end - xdp->data;
xdp_frame->headroom = headroom - sizeof(*xdp_frame);
xdp_frame->metasize = metasize;
xdp_frame->frame_sz = xdp->frame_sz;
xdp_frame->flags = xdp->flags;

return 0;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
{
struct xdp_frame *xdp_frame;

if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
return xdp_convert_zc_to_xdp_frame(xdp);


xdp_frame = xdp->data_hard_start;
if (__builtin_expect(!!(xdp_update_frame_from_buff(xdp, xdp_frame) < 0), 0))
return ((void *)0);


xdp_frame->mem = xdp->rxq->mem;

return xdp_frame;
}

void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
struct xdp_buff *xdp);
void xdp_return_frame(struct xdp_frame *xdpf);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
void xdp_return_buff(struct xdp_buff *xdp);
void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
void xdp_return_frame_bulk(struct xdp_frame *xdpf,
struct xdp_frame_bulk *bq);






void __xdp_release_frame(void *data, struct xdp_mem_info *mem);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xdp_release_frame(struct xdp_frame *xdpf)
{
struct xdp_mem_info *mem = &xdpf->mem;
struct skb_shared_info *sinfo;
int i;


if (mem->type != MEM_TYPE_PAGE_POOL)
return;

if (__builtin_expect(!!(!xdp_frame_has_frags(xdpf)), 1))
goto out;

sinfo = xdp_get_shared_info_from_frame(xdpf);
for (i = 0; i < sinfo->nr_frags; i++) {
struct page *page = skb_frag_page(&sinfo->frags[i]);

__xdp_release_frame(lowmem_page_address(page), mem);
}
out:
__xdp_release_frame(xdpf->data, mem);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
{
struct skb_shared_info *sinfo;
unsigned int len = xdpf->len;

if (__builtin_expect(!!(!xdp_frame_has_frags(xdpf)), 1))
goto out;

sinfo = xdp_get_shared_info_from_frame(xdpf);
len += sinfo->xdp_frags_size;
out:
return len;
}

int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
struct net_device *dev, u32 queue_index,
unsigned int napi_id, u32 frag_size);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
struct net_device *dev, u32 queue_index,
unsigned int napi_id)
{
return __xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id, 0);
}

void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
enum xdp_mem_type type, void *allocator);
void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
int xdp_reg_mem_model(struct xdp_mem_info *mem,
enum xdp_mem_type type, void *allocator);
void xdp_unreg_mem_model(struct xdp_mem_info *mem);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
xdp_set_data_meta_invalid(struct xdp_buff *xdp)
{
xdp->data_meta = xdp->data + 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
xdp_data_meta_unsupported(const struct xdp_buff *xdp)
{
return __builtin_expect(!!(xdp->data_meta > xdp->data), 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xdp_metalen_invalid(unsigned long metalen)
{
return (metalen & (sizeof(__u32) - 1)) || (metalen > 32);
}

struct xdp_attachment_info {
struct bpf_prog *prog;
u32 flags;
};

struct netdev_bpf;
void xdp_attachment_setup(struct xdp_attachment_info *info,
struct netdev_bpf *bpf);
# 44 "./include/linux/netdevice.h" 2


# 1 "./include/uapi/linux/neighbour.h" 1





# 1 "./include/linux/netlink.h" 1








# 1 "./include/net/scm.h" 1







# 1 "./include/linux/security.h" 1
# 26 "./include/linux/security.h"
# 1 "./include/linux/kernel_read_file.h" 1




# 1 "./include/linux/file.h" 1
# 14 "./include/linux/file.h"
struct file;

extern void fput(struct file *);
extern void fput_many(struct file *, unsigned int);

struct file_operations;
struct task_struct;
struct vfsmount;
struct dentry;
struct inode;
struct path;
extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *,
const char *, int flags, const struct file_operations *);
extern struct file *alloc_file_clone(struct file *, int flags,
const struct file_operations *);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fput_light(struct file *file, int fput_needed)
{
if (fput_needed)
fput(file);
}

struct fd {
struct file *file;
unsigned int flags;
};



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fdput(struct fd fd)
{
if (fd.flags & 1)
fput(fd.file);
}

extern struct file *fget(unsigned int fd);
extern struct file *fget_many(unsigned int fd, unsigned int refs);
extern struct file *fget_raw(unsigned int fd);
extern struct file *fget_task(struct task_struct *task, unsigned int fd);
extern unsigned long __fdget(unsigned int fd);
extern unsigned long __fdget_raw(unsigned int fd);
extern unsigned long __fdget_pos(unsigned int fd);
extern void __f_unlock_pos(struct file *);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct fd __to_fd(unsigned long v)
{
return (struct fd){(struct file *)(v & ~3),v & 3};
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct fd fdget(unsigned int fd)
{
return __to_fd(__fdget(fd));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct fd fdget_raw(unsigned int fd)
{
return __to_fd(__fdget_raw(fd));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct fd fdget_pos(int fd)
{
return __to_fd(__fdget_pos(fd));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fdput_pos(struct fd f)
{
if (f.flags & 2)
__f_unlock_pos(f.file);
fdput(f);
}

extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
extern bool get_close_on_exec(unsigned int fd);
extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
extern int get_unused_fd_flags(unsigned flags);
extern void put_unused_fd(unsigned int fd);

extern void fd_install(unsigned int fd, struct file *file);

extern int __receive_fd(struct file *file, int *ufd,
unsigned int o_flags);

extern int receive_fd(struct file *file, unsigned int o_flags);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int receive_fd_user(struct file *file, int *ufd,
unsigned int o_flags)
{
if (ufd == ((void *)0))
return -14;
return __receive_fd(file, ufd, o_flags);
}
int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags);

extern void flush_delayed_fput(void);
extern void __fput_sync(struct file *);

extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max;
# 6 "./include/linux/kernel_read_file.h" 2
# 22 "./include/linux/kernel_read_file.h"
enum kernel_read_file_id {
READING_UNKNOWN, READING_FIRMWARE, READING_MODULE, READING_KEXEC_IMAGE, READING_KEXEC_INITRAMFS, READING_POLICY, READING_X509_CERTIFICATE, READING_MAX_ID,
};

static const char * const kernel_read_file_str[] = {
"unknown", "firmware", "kernel-module", "kexec-image", "kexec-initramfs", "security-policy", "x509-certificate", "",
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *kernel_read_file_id_str(enum kernel_read_file_id id)
{
if ((unsigned int)id >= READING_MAX_ID)
return kernel_read_file_str[READING_UNKNOWN];

return kernel_read_file_str[id];
}

int kernel_read_file(struct file *file, loff_t offset,
void **buf, size_t buf_size,
size_t *file_size,
enum kernel_read_file_id id);
int kernel_read_file_from_path(const char *path, loff_t offset,
void **buf, size_t buf_size,
size_t *file_size,
enum kernel_read_file_id id);
int kernel_read_file_from_path_initns(const char *path, loff_t offset,
void **buf, size_t buf_size,
size_t *file_size,
enum kernel_read_file_id id);
int kernel_read_file_from_fd(int fd, loff_t offset,
void **buf, size_t buf_size,
size_t *file_size,
enum kernel_read_file_id id);
# 27 "./include/linux/security.h" 2








struct linux_binprm;
struct cred;
struct rlimit;
struct kernel_siginfo;
struct sembuf;
struct kern_ipc_perm;
struct audit_context;
struct super_block;
struct inode;
struct dentry;
struct file;
struct vfsmount;
struct path;
struct qstr;
struct iattr;
struct fown_struct;
struct file_operations;
struct msg_msg;
struct xattr;
struct kernfs_node;
struct xfrm_sec_ctx;
struct mm_struct;
struct fs_context;
struct fs_parameter;
enum fs_value_type;
struct watch;
struct watch_notification;
# 73 "./include/linux/security.h"
struct ctl_table;
struct audit_krule;
struct user_namespace;
struct timezone;

enum lsm_event {
LSM_POLICY_CHANGE,
};
# 106 "./include/linux/security.h"
enum lockdown_reason {
LOCKDOWN_NONE,
LOCKDOWN_MODULE_SIGNATURE,
LOCKDOWN_DEV_MEM,
LOCKDOWN_EFI_TEST,
LOCKDOWN_KEXEC,
LOCKDOWN_HIBERNATION,
LOCKDOWN_PCI_ACCESS,
LOCKDOWN_IOPORT,
LOCKDOWN_MSR,
LOCKDOWN_ACPI_TABLES,
LOCKDOWN_PCMCIA_CIS,
LOCKDOWN_TIOCSSERIAL,
LOCKDOWN_MODULE_PARAMETERS,
LOCKDOWN_MMIOTRACE,
LOCKDOWN_DEBUGFS,
LOCKDOWN_XMON_WR,
LOCKDOWN_BPF_WRITE_USER,
LOCKDOWN_INTEGRITY_MAX,
LOCKDOWN_KCORE,
LOCKDOWN_KPROBES,
LOCKDOWN_BPF_READ_KERNEL,
LOCKDOWN_PERF,
LOCKDOWN_TRACEFS,
LOCKDOWN_XMON_RW,
LOCKDOWN_XFRM_SECRET,
LOCKDOWN_CONFIDENTIALITY_MAX,
};

extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];


extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
int cap, unsigned int opts);
extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz);
extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
int cap_inode_removexattr(struct user_namespace *mnt_userns,
struct dentry *dentry, const char *name);
int cap_inode_need_killpriv(struct dentry *dentry);
int cap_inode_killpriv(struct user_namespace *mnt_userns,
struct dentry *dentry);
int cap_inode_getsecurity(struct user_namespace *mnt_userns,
struct inode *inode, const char *name, void **buffer,
bool alloc);
extern int cap_mmap_addr(unsigned long addr);
extern int cap_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags);
extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
extern int cap_task_setscheduler(struct task_struct *p);
extern int cap_task_setioprio(struct task_struct *p, int ioprio);
extern int cap_task_setnice(struct task_struct *p, int nice);
extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);

struct msghdr;
struct sk_buff;
struct sock;
struct sockaddr;
struct socket;
struct flowi_common;
struct dst_entry;
struct xfrm_selector;
struct xfrm_policy;
struct xfrm_state;
struct xfrm_user_sec_ctx;
struct seq_file;
struct sctp_association;


extern unsigned long mmap_min_addr;
extern unsigned long dac_mmap_min_addr;
# 212 "./include/linux/security.h"
struct sched_param;
struct request_sock;







extern int mmap_min_addr_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);



typedef int (*initxattrs) (struct inode *inode,
const struct xattr *xattr_array, void *fs_data);






enum kernel_load_data_id {
LOADING_UNKNOWN, LOADING_FIRMWARE, LOADING_MODULE, LOADING_KEXEC_IMAGE, LOADING_KEXEC_INITRAMFS, LOADING_POLICY, LOADING_X509_CERTIFICATE, LOADING_MAX_ID,
};

static const char * const kernel_load_data_str[] = {
"unknown", "firmware", "kernel-module", "kexec-image", "kexec-initramfs", "security-policy", "x509-certificate", "",
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *kernel_load_data_id_str(enum kernel_load_data_id id)
{
if ((unsigned)id >= LOADING_MAX_ID)
return kernel_load_data_str[LOADING_UNKNOWN];

return kernel_load_data_str[id];
}
# 476 "./include/linux/security.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int call_blocking_lsm_notifier(enum lsm_event event, void *data)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int register_blocking_lsm_notifier(struct notifier_block *nb)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int unregister_blocking_lsm_notifier(struct notifier_block *nb)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_free_mnt_opts(void **mnt_opts)
{
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_init(void)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int early_security_init(void)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_binder_set_context_mgr(const struct cred *mgr)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_binder_transaction(const struct cred *from,
const struct cred *to)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_binder_transfer_binder(const struct cred *from,
const struct cred *to)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_binder_transfer_file(const struct cred *from,
const struct cred *to,
struct file *file)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
return cap_ptrace_access_check(child, mode);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_ptrace_traceme(struct task_struct *parent)
{
return cap_ptrace_traceme(parent);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_capget(struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
return cap_capget(target, effective, inheritable, permitted);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_capset(struct cred *new,
const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
{
return cap_capset(new, old, effective, inheritable, permitted);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_capable(const struct cred *cred,
struct user_namespace *ns,
int cap,
unsigned int opts)
{
return cap_capable(cred, ns, cap, opts);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_quotactl(int cmds, int type, int id,
struct super_block *sb)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_quota_on(struct dentry *dentry)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_syslog(int type)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_settime64(const struct timespec64 *ts,
const struct timezone *tz)
{
return cap_settime(ts, tz);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_bprm_creds_for_exec(struct linux_binprm *bprm)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_bprm_creds_from_file(struct linux_binprm *bprm,
struct file *file)
{
return cap_bprm_creds_from_file(bprm, file);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_bprm_check(struct linux_binprm *bprm)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_bprm_committing_creds(struct linux_binprm *bprm)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_bprm_committed_creds(struct linux_binprm *bprm)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_fs_context_dup(struct fs_context *fc,
struct fs_context *src_fc)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_fs_context_parse_param(struct fs_context *fc,
struct fs_parameter *param)
{
return -519;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sb_alloc(struct super_block *sb)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_sb_delete(struct super_block *sb)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_sb_free(struct super_block *sb)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sb_eat_lsm_opts(char *options,
void **mnt_opts)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sb_remount(struct super_block *sb,
void *mnt_opts)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sb_mnt_opts_compat(struct super_block *sb,
void *mnt_opts)
{
return 0;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sb_kern_mount(struct super_block *sb)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sb_show_options(struct seq_file *m,
struct super_block *sb)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sb_statfs(struct dentry *dentry)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sb_mount(const char *dev_name, const struct path *path,
const char *type, unsigned long flags,
void *data)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sb_umount(struct vfsmount *mnt, int flags)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sb_pivotroot(const struct path *old_path,
const struct path *new_path)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sb_set_mnt_opts(struct super_block *sb,
void *mnt_opts,
unsigned long kern_flags,
unsigned long *set_kern_flags)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb,
unsigned long kern_flags,
unsigned long *set_kern_flags)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_move_mount(const struct path *from_path,
const struct path *to_path)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_path_notify(const struct path *path, u64 mask,
unsigned int obj_type)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_alloc(struct inode *inode)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_inode_free(struct inode *inode)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_dentry_init_security(struct dentry *dentry,
int mode,
const struct qstr *name,
const char **xattr_name,
void **ctx,
u32 *ctxlen)
{
return -95;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_dentry_create_files_as(struct dentry *dentry,
int mode, struct qstr *name,
const struct cred *old,
struct cred *new)
{
return 0;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_init_security(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
const initxattrs xattrs,
void *fs_data)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_init_security_anon(struct inode *inode,
const struct qstr *name,
const struct inode *context_inode)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_old_inode_init_security(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
const char **name,
void **value, size_t *len)
{
return -95;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_create(struct inode *dir,
struct dentry *dentry,
umode_t mode)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_link(struct dentry *old_dentry,
struct inode *dir,
struct dentry *new_dentry)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_unlink(struct inode *dir,
struct dentry *dentry)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_symlink(struct inode *dir,
struct dentry *dentry,
const char *old_name)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_mkdir(struct inode *dir,
struct dentry *dentry,
int mode)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_rmdir(struct inode *dir,
struct dentry *dentry)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_mknod(struct inode *dir,
struct dentry *dentry,
int mode, dev_t dev)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_rename(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry,
unsigned int flags)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_readlink(struct dentry *dentry)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_follow_link(struct dentry *dentry,
struct inode *inode,
bool rcu)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_permission(struct inode *inode, int mask)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_setattr(struct dentry *dentry,
struct iattr *attr)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_getattr(const struct path *path)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_setxattr(struct user_namespace *mnt_userns,
struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
{
return cap_inode_setxattr(dentry, name, value, size, flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_inode_post_setxattr(struct dentry *dentry,
const char *name, const void *value, size_t size, int flags)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_getxattr(struct dentry *dentry,
const char *name)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_listxattr(struct dentry *dentry)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_removexattr(struct user_namespace *mnt_userns,
struct dentry *dentry,
const char *name)
{
return cap_inode_removexattr(mnt_userns, dentry, name);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_need_killpriv(struct dentry *dentry)
{
return cap_inode_need_killpriv(dentry);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_killpriv(struct user_namespace *mnt_userns,
struct dentry *dentry)
{
return cap_inode_killpriv(mnt_userns, dentry);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_getsecurity(struct user_namespace *mnt_userns,
struct inode *inode,
const char *name, void **buffer,
bool alloc)
{
return cap_inode_getsecurity(mnt_userns, inode, name, buffer, alloc);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
return -95;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_inode_getsecid(struct inode *inode, u32 *secid)
{
*secid = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_copy_up(struct dentry *src, struct cred **new)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_copy_up_xattr(const char *name)
{
return -95;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_file_permission(struct file *file, int mask)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_file_alloc(struct file *file)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_file_free(struct file *file)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_mmap_addr(unsigned long addr)
{
return cap_mmap_addr(addr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_file_mprotect(struct vm_area_struct *vma,
unsigned long reqprot,
unsigned long prot)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_file_lock(struct file *file, unsigned int cmd)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_file_set_fowner(struct file *file)
{
return;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown,
int sig)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_file_receive(struct file *file)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_file_open(struct file *file)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_alloc(struct task_struct *task,
unsigned long clone_flags)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_task_free(struct task_struct *task)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_cred_free(struct cred *cred)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_prepare_creds(struct cred *new,
const struct cred *old,
gfp_t gfp)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_transfer_creds(struct cred *new,
const struct cred *old)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_cred_getsecid(const struct cred *c, u32 *secid)
{
*secid = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_kernel_act_as(struct cred *cred, u32 secid)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_kernel_create_files_as(struct cred *cred,
struct inode *inode)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_kernel_module_request(char *kmod_name)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_kernel_load_data(enum kernel_load_data_id id, bool contents)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_kernel_post_load_data(char *buf, loff_t size,
enum kernel_load_data_id id,
char *description)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_kernel_read_file(struct file *file,
enum kernel_read_file_id id,
bool contents)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_kernel_post_read_file(struct file *file,
char *buf, loff_t size,
enum kernel_read_file_id id)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_fix_setuid(struct cred *new,
const struct cred *old,
int flags)
{
return cap_task_fix_setuid(new, old, flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_fix_setgid(struct cred *new,
const struct cred *old,
int flags)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_getpgid(struct task_struct *p)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_getsid(struct task_struct *p)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_current_getsecid_subj(u32 *secid)
{
*secid = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_task_getsecid_obj(struct task_struct *p, u32 *secid)
{
*secid = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_setnice(struct task_struct *p, int nice)
{
return cap_task_setnice(p, nice);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_setioprio(struct task_struct *p, int ioprio)
{
return cap_task_setioprio(p, ioprio);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_getioprio(struct task_struct *p)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_prlimit(const struct cred *cred,
const struct cred *tcred,
unsigned int flags)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_setrlimit(struct task_struct *p,
unsigned int resource,
struct rlimit *new_rlim)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_setscheduler(struct task_struct *p)
{
return cap_task_setscheduler(p);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_getscheduler(struct task_struct *p)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_movememory(struct task_struct *p)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_kill(struct task_struct *p,
struct kernel_siginfo *info, int sig,
const struct cred *cred)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_task_prctl(int option, unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5)
{
return cap_task_prctl(option, arg2, arg3, arg4, arg5);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_task_to_inode(struct task_struct *p, struct inode *inode)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_ipc_permission(struct kern_ipc_perm *ipcp,
short flag)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
{
*secid = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_msg_msg_alloc(struct msg_msg *msg)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_msg_msg_free(struct msg_msg *msg)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_msg_queue_alloc(struct kern_ipc_perm *msq)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_msg_queue_free(struct kern_ipc_perm *msq)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_msg_queue_associate(struct kern_ipc_perm *msq,
int msqflg)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_msg_queue_msgsnd(struct kern_ipc_perm *msq,
struct msg_msg *msg, int msqflg)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_msg_queue_msgrcv(struct kern_ipc_perm *msq,
struct msg_msg *msg,
struct task_struct *target,
long type, int mode)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_shm_alloc(struct kern_ipc_perm *shp)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_shm_free(struct kern_ipc_perm *shp)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_shm_associate(struct kern_ipc_perm *shp,
int shmflg)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_shm_shmat(struct kern_ipc_perm *shp,
char *shmaddr, int shmflg)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sem_alloc(struct kern_ipc_perm *sma)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_sem_free(struct kern_ipc_perm *sma)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sem_associate(struct kern_ipc_perm *sma, int semflg)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sem_semctl(struct kern_ipc_perm *sma, int cmd)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sem_semop(struct kern_ipc_perm *sma,
struct sembuf *sops, unsigned nsops,
int alter)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_d_instantiate(struct dentry *dentry,
struct inode *inode)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_getprocattr(struct task_struct *p, const char *lsm,
char *name, char **value)
{
return -22;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_setprocattr(const char *lsm, char *name,
void *value, size_t size)
{
return -22;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_netlink_send(struct sock *sk, struct sk_buff *skb)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_ismaclabel(const char *name)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
return -95;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_secctx_to_secid(const char *secdata,
u32 seclen,
u32 *secid)
{
return -95;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_release_secctx(char *secdata, u32 seclen)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_inode_invalidate_secctx(struct inode *inode)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
{
return -95;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
{
return -95;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
{
return -95;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_locked_down(enum lockdown_reason what)
{
return 0;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_post_notification(const struct cred *w_cred,
const struct cred *cred,
struct watch_notification *n)
{
return 0;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_watch_key(struct key *key)
{
return 0;
}
# 1429 "./include/linux/security.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_unix_stream_connect(struct sock *sock,
struct sock *other,
struct sock *newsk)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_unix_may_send(struct socket *sock,
struct socket *other)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_create(int family, int type,
int protocol, int kern)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_post_create(struct socket *sock,
int family,
int type,
int protocol, int kern)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_socketpair(struct socket *socka,
struct socket *sockb)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_bind(struct socket *sock,
struct sockaddr *address,
int addrlen)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_connect(struct socket *sock,
struct sockaddr *address,
int addrlen)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_listen(struct socket *sock, int backlog)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_accept(struct socket *sock,
struct socket *newsock)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_sendmsg(struct socket *sock,
struct msghdr *msg, int size)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_recvmsg(struct socket *sock,
struct msghdr *msg, int size,
int flags)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_getsockname(struct socket *sock)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_getpeername(struct socket *sock)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_getsockopt(struct socket *sock,
int level, int optname)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_setsockopt(struct socket *sock,
int level, int optname)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_shutdown(struct socket *sock, int how)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sock_rcv_skb(struct sock *sk,
struct sk_buff *skb)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_getpeersec_stream(struct socket *sock, char *optval,
int *optlen, unsigned len)
{
return -92;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
{
return -92;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_sk_free(struct sock *sk)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_sk_classify_flow(struct sock *sk,
struct flowi_common *flic)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_req_classify_flow(const struct request_sock *req,
struct flowi_common *flic)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_sock_graft(struct sock *sk, struct socket *parent)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_inet_conn_request(const struct sock *sk,
struct sk_buff *skb, struct request_sock *req)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_inet_csk_clone(struct sock *newsk,
const struct request_sock *req)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_inet_conn_established(struct sock *sk,
struct sk_buff *skb)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_secmark_relabel_packet(u32 secid)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_secmark_refcount_inc(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_secmark_refcount_dec(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_tun_dev_alloc_security(void **security)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_tun_dev_free_security(void *security)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_tun_dev_create(void)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_tun_dev_attach_queue(void *security)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_tun_dev_attach(struct sock *sk, void *security)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_tun_dev_open(void *security)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sctp_assoc_request(struct sctp_association *asoc,
struct sk_buff *skb)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sctp_bind_connect(struct sock *sk, int optname,
struct sockaddr *address,
int addrlen)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_sctp_sk_clone(struct sctp_association *asoc,
struct sock *sk,
struct sock *newsk)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_sctp_assoc_established(struct sctp_association *asoc,
struct sk_buff *skb)
{
return 0;
}
# 1660 "./include/linux/security.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_ib_alloc_security(void **sec)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_ib_free_security(void *sec)
{
}
# 1701 "./include/linux/security.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
struct xfrm_user_sec_ctx *sec_ctx,
gfp_t gfp)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_xfrm_policy_clone(struct xfrm_sec_ctx *old, struct xfrm_sec_ctx **new_ctxp)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_xfrm_state_alloc(struct xfrm_state *x,
struct xfrm_user_sec_ctx *sec_ctx)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_xfrm_state_free(struct xfrm_state *x)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_xfrm_state_delete(struct xfrm_state *x)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
const struct flowi_common *flic)
{
return 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_skb_classify_flow(struct sk_buff *skb,
struct flowi_common *flic)
{
}
# 1785 "./include/linux/security.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_path_unlink(const struct path *dir, struct dentry *dentry)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_path_mkdir(const struct path *dir, struct dentry *dentry,
umode_t mode)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_path_rmdir(const struct path *dir, struct dentry *dentry)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_path_mknod(const struct path *dir, struct dentry *dentry,
umode_t mode, unsigned int dev)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_path_truncate(const struct path *path)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_path_symlink(const struct path *dir, struct dentry *dentry,
const char *old_name)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_path_link(struct dentry *old_dentry,
const struct path *new_dir,
struct dentry *new_dentry)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_path_rename(const struct path *old_dir,
struct dentry *old_dentry,
const struct path *new_dir,
struct dentry *new_dentry,
unsigned int flags)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_path_chmod(const struct path *path, umode_t mode)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_path_chroot(const struct path *path)
{
return 0;
}
# 1861 "./include/linux/security.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_key_alloc(struct key *key,
const struct cred *cred,
unsigned long flags)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_key_free(struct key *key)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_key_permission(key_ref_t key_ref,
const struct cred *cred,
enum key_need_perm need_perm)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_key_getsecurity(struct key *key, char **_buffer)
{
*_buffer = ((void *)0);
return 0;
}
# 1934 "./include/linux/security.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dentry *securityfs_create_dir(const char *name,
struct dentry *parent)
{
return ERR_PTR(-19);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dentry *securityfs_create_file(const char *name,
umode_t mode,
struct dentry *parent,
void *data,
const struct file_operations *fops)
{
return ERR_PTR(-19);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dentry *securityfs_create_symlink(const char *name,
struct dentry *parent,
const char *target,
const struct inode_operations *iops)
{
return ERR_PTR(-19);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void securityfs_remove(struct dentry *dentry)
{}




union bpf_attr;
struct bpf_map;
struct bpf_prog;
struct bpf_prog_aux;
# 1976 "./include/linux/security.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_bpf(int cmd, union bpf_attr *attr,
unsigned int size)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_bpf_map(struct bpf_map *map, fmode_t fmode)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_bpf_prog(struct bpf_prog *prog)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_bpf_map_alloc(struct bpf_map *map)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_bpf_map_free(struct bpf_map *map)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_bpf_prog_free(struct bpf_prog_aux *aux)
{ }




struct perf_event_attr;
struct perf_event;
# 2021 "./include/linux/security.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_perf_event_open(struct perf_event_attr *attr,
int type)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_perf_event_alloc(struct perf_event *event)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void security_perf_event_free(struct perf_event *event)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_perf_event_read(struct perf_event *event)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_perf_event_write(struct perf_event *event)
{
return 0;
}
# 2053 "./include/linux/security.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_uring_override_creds(const struct cred *new)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int security_uring_sqpoll(void)
{
return 0;
}
# 9 "./include/net/scm.h" 2
# 18 "./include/net/scm.h"
struct scm_creds {
u32 pid;
kuid_t uid;
kgid_t gid;
};

struct scm_fp_list {
short count;
short max;
struct user_struct *user;
struct file *fp[253];
};

struct scm_cookie {
struct pid *pid;
struct scm_fp_list *fp;
struct scm_creds creds;



};

void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
void __scm_destroy(struct scm_cookie *scm);
struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm)
{ }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void scm_set_cred(struct scm_cookie *scm,
struct pid *pid, kuid_t uid, kgid_t gid)
{
scm->pid = get_pid(pid);
scm->creds.pid = pid_vnr(pid);
scm->creds.uid = uid;
scm->creds.gid = gid;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void scm_destroy_cred(struct scm_cookie *scm)
{
put_pid(scm->pid);
scm->pid = ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void scm_destroy(struct scm_cookie *scm)
{
scm_destroy_cred(scm);
if (scm->fp)
__scm_destroy(scm);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int scm_send(struct socket *sock, struct msghdr *msg,
struct scm_cookie *scm, bool forcecreds)
{
memset(scm, 0, sizeof(*scm));
scm->creds.uid = (kuid_t){ -1 };
scm->creds.gid = (kgid_t){ -1 };
if (forcecreds)
scm_set_cred(scm, task_tgid(get_current()), (({ ({ do { } while (0 && (!((1)))); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); })->uid; })), (({ ({ do { } while (0 && (!((1)))); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); })->gid; })));
unix_get_peersec_dgram(sock, scm);
if (msg->msg_controllen <= 0)
return 0;
return __scm_send(sock, msg, scm);
}
# 109 "./include/net/scm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
{ }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void scm_recv(struct socket *sock, struct msghdr *msg,
struct scm_cookie *scm, int flags)
{
if (!msg->msg_control) {
if (arch_test_bit(3, &sock->flags) || scm->fp)
msg->msg_flags |= 8;
scm_destroy(scm);
return;
}

if (arch_test_bit(3, &sock->flags)) {
struct user_namespace *current_ns = (({ ({ do { } while (0 && (!((1)))); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); })->user_ns; }));
struct ucred ucreds = {
.pid = scm->creds.pid,
.uid = from_kuid_munged(current_ns, scm->creds.uid),
.gid = from_kgid_munged(current_ns, scm->creds.gid),
};
put_cmsg(msg, 1, 0x02, sizeof(ucreds), &ucreds);
}

scm_destroy_cred(scm);

scm_passec(sock, msg, scm);

if (!scm->fp)
return;

scm_detach_fds(msg, scm);
}
# 10 "./include/linux/netlink.h" 2
# 1 "./include/uapi/linux/netlink.h" 1
# 37 "./include/uapi/linux/netlink.h"
struct sockaddr_nl {
__kernel_sa_family_t nl_family;
unsigned short nl_pad;
__u32 nl_pid;
__u32 nl_groups;
};

struct nlmsghdr {
__u32 nlmsg_len;
__u16 nlmsg_type;
__u16 nlmsg_flags;
__u32 nlmsg_seq;
__u32 nlmsg_pid;
};
# 110 "./include/uapi/linux/netlink.h"
struct nlmsgerr {
int error;
struct nlmsghdr msg;
# 122 "./include/uapi/linux/netlink.h"
};
# 137 "./include/uapi/linux/netlink.h"
enum nlmsgerr_attrs {
NLMSGERR_ATTR_UNUSED,
NLMSGERR_ATTR_MSG,
NLMSGERR_ATTR_OFFS,
NLMSGERR_ATTR_COOKIE,
NLMSGERR_ATTR_POLICY,

__NLMSGERR_ATTR_MAX,
NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1
};
# 163 "./include/uapi/linux/netlink.h"
struct nl_pktinfo {
__u32 group;
};

struct nl_mmap_req {
unsigned int nm_block_size;
unsigned int nm_block_nr;
unsigned int nm_frame_size;
unsigned int nm_frame_nr;
};

struct nl_mmap_hdr {
unsigned int nm_status;
unsigned int nm_len;
__u32 nm_group;

__u32 nm_pid;
__u32 nm_uid;
__u32 nm_gid;
};
# 200 "./include/uapi/linux/netlink.h"
enum {
NETLINK_UNCONNECTED = 0,
NETLINK_CONNECTED,
};
# 214 "./include/uapi/linux/netlink.h"
struct nlattr {
__u16 nla_len;
__u16 nla_type;
};
# 250 "./include/uapi/linux/netlink.h"
struct nla_bitfield32 {
__u32 value;
__u32 selector;
};
# 287 "./include/uapi/linux/netlink.h"
enum netlink_attribute_type {
NL_ATTR_TYPE_INVALID,

NL_ATTR_TYPE_FLAG,

NL_ATTR_TYPE_U8,
NL_ATTR_TYPE_U16,
NL_ATTR_TYPE_U32,
NL_ATTR_TYPE_U64,

NL_ATTR_TYPE_S8,
NL_ATTR_TYPE_S16,
NL_ATTR_TYPE_S32,
NL_ATTR_TYPE_S64,

NL_ATTR_TYPE_BINARY,
NL_ATTR_TYPE_STRING,
NL_ATTR_TYPE_NUL_STRING,

NL_ATTR_TYPE_NESTED,
NL_ATTR_TYPE_NESTED_ARRAY,

NL_ATTR_TYPE_BITFIELD32,
};
# 340 "./include/uapi/linux/netlink.h"
enum netlink_policy_type_attr {
NL_POLICY_TYPE_ATTR_UNSPEC,
NL_POLICY_TYPE_ATTR_TYPE,
NL_POLICY_TYPE_ATTR_MIN_VALUE_S,
NL_POLICY_TYPE_ATTR_MAX_VALUE_S,
NL_POLICY_TYPE_ATTR_MIN_VALUE_U,
NL_POLICY_TYPE_ATTR_MAX_VALUE_U,
NL_POLICY_TYPE_ATTR_MIN_LENGTH,
NL_POLICY_TYPE_ATTR_MAX_LENGTH,
NL_POLICY_TYPE_ATTR_POLICY_IDX,
NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE,
NL_POLICY_TYPE_ATTR_BITFIELD32_MASK,
NL_POLICY_TYPE_ATTR_PAD,
NL_POLICY_TYPE_ATTR_MASK,


__NL_POLICY_TYPE_ATTR_MAX,
NL_POLICY_TYPE_ATTR_MAX = __NL_POLICY_TYPE_ATTR_MAX - 1
};
# 11 "./include/linux/netlink.h" 2

struct net;

void do_trace_netlink_extack(const char *msg);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
{
return (struct nlmsghdr *)skb->data;
}

enum netlink_skb_flags {
NETLINK_SKB_DST = 0x8,
};

struct netlink_skb_parms {
struct scm_creds creds;
__u32 portid;
__u32 dst_group;
__u32 flags;
struct sock *sk;
bool nsid_is_set;
int nsid;
};





void netlink_table_grab(void);
void netlink_table_ungrab(void);





struct netlink_kernel_cfg {
unsigned int groups;
unsigned int flags;
void (*input)(struct sk_buff *skb);
struct mutex *cb_mutex;
int (*bind)(struct net *net, int group);
void (*unbind)(struct net *net, int group);
bool (*compare)(struct net *net, struct sock *sk);
};

struct sock *__netlink_kernel_create(struct net *net, int unit,
struct module *module,
struct netlink_kernel_cfg *cfg);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *
netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
{
return __netlink_kernel_create(net, unit, ((struct module *)0), cfg);
}
# 77 "./include/linux/netlink.h"
struct netlink_ext_ack {
const char *_msg;
const struct nlattr *bad_attr;
const struct nla_policy *policy;
u8 cookie[20];
u8 cookie_len;
};
# 129 "./include/linux/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
u64 cookie)
{
if (!extack)
return;
memcpy(extack->cookie, &cookie, sizeof(cookie));
extack->cookie_len = sizeof(cookie);
}

void netlink_kernel_release(struct sock *sk);
int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
int netlink_change_ngroups(struct sock *sk, unsigned int groups);
void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
const struct netlink_ext_ack *extack);
int netlink_has_listeners(struct sock *sk, unsigned int group);
bool netlink_strict_get_check(struct sk_buff *skb);

int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
int netlink_register_notifier(struct notifier_block *nb);
int netlink_unregister_notifier(struct notifier_block *nb);


struct sock *netlink_getsockbyfilp(struct file *filp);
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
long *timeo, struct sock *ssk);
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
int netlink_sendskb(struct sock *sk, struct sk_buff *skb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *
netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff *nskb;

nskb = skb_clone(skb, gfp_mask);
if (!nskb)
return ((void *)0);


if (is_vmalloc_addr(skb->head))
nskb->destructor = skb->destructor;

return nskb;
}
# 192 "./include/linux/netlink.h"
struct netlink_callback {
struct sk_buff *skb;
const struct nlmsghdr *nlh;
int (*dump)(struct sk_buff * skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
void *data;

struct module *module;
struct netlink_ext_ack *extack;
u16 family;
u16 answer_flags;
u32 min_dump_alloc;
unsigned int prev_seq, seq;
bool strict_check;
union {
u8 ctx[48];




long args[6];
};
};

struct netlink_notify {
struct net *net;
u32 portid;
int protocol;
};

struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);

struct netlink_dump_control {
int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *skb, struct netlink_callback *);
int (*done)(struct netlink_callback *);
void *data;
struct module *module;
u32 min_dump_alloc;
};

int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *control);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *control)
{
if (!control->module)
control->module = ((struct module *)0);

return __netlink_dump_start(ssk, skb, nlh, control);
}

struct netlink_tap {
struct net_device *dev;
struct module *module;
struct list_head list;
};

int netlink_add_tap(struct netlink_tap *nt);
int netlink_remove_tap(struct netlink_tap *nt);

bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
struct user_namespace *ns, int cap);
bool netlink_ns_capable(const struct sk_buff *skb,
struct user_namespace *ns, int cap);
bool netlink_capable(const struct sk_buff *skb, int cap);
bool netlink_net_capable(const struct sk_buff *skb, int cap);
# 7 "./include/uapi/linux/neighbour.h" 2

struct ndmsg {
__u8 ndm_family;
__u8 ndm_pad1;
__u16 ndm_pad2;
__s32 ndm_ifindex;
__u16 ndm_state;
__u8 ndm_flags;
__u8 ndm_type;
};

enum {
NDA_UNSPEC,
NDA_DST,
NDA_LLADDR,
NDA_CACHEINFO,
NDA_PROBES,
NDA_VLAN,
NDA_PORT,
NDA_VNI,
NDA_IFINDEX,
NDA_MASTER,
NDA_LINK_NETNSID,
NDA_SRC_VNI,
NDA_PROTOCOL,
NDA_NH_ID,
NDA_FDB_EXT_ATTRS,
NDA_FLAGS_EXT,
__NDA_MAX
};
# 89 "./include/uapi/linux/neighbour.h"
struct nda_cacheinfo {
__u32 ndm_confirmed;
__u32 ndm_used;
__u32 ndm_updated;
__u32 ndm_refcnt;
};
# 121 "./include/uapi/linux/neighbour.h"
struct ndt_stats {
__u64 ndts_allocs;
__u64 ndts_destroys;
__u64 ndts_hash_grows;
__u64 ndts_res_failed;
__u64 ndts_lookups;
__u64 ndts_hits;
__u64 ndts_rcv_probes_mcast;
__u64 ndts_rcv_probes_ucast;
__u64 ndts_periodic_gc_runs;
__u64 ndts_forced_gc_runs;
__u64 ndts_table_fulls;
};

enum {
NDTPA_UNSPEC,
NDTPA_IFINDEX,
NDTPA_REFCNT,
NDTPA_REACHABLE_TIME,
NDTPA_BASE_REACHABLE_TIME,
NDTPA_RETRANS_TIME,
NDTPA_GC_STALETIME,
NDTPA_DELAY_PROBE_TIME,
NDTPA_QUEUE_LEN,
NDTPA_APP_PROBES,
NDTPA_UCAST_PROBES,
NDTPA_MCAST_PROBES,
NDTPA_ANYCAST_DELAY,
NDTPA_PROXY_DELAY,
NDTPA_PROXY_QLEN,
NDTPA_LOCKTIME,
NDTPA_QUEUE_LENBYTES,
NDTPA_MCAST_REPROBES,
NDTPA_PAD,
__NDTPA_MAX
};


struct ndtmsg {
__u8 ndtm_family;
__u8 ndtm_pad1;
__u16 ndtm_pad2;
};

struct ndt_config {
__u16 ndtc_key_len;
__u16 ndtc_entry_size;
__u32 ndtc_entries;
__u32 ndtc_last_flush;
__u32 ndtc_last_rand;
__u32 ndtc_hash_rnd;
__u32 ndtc_hash_mask;
__u32 ndtc_hash_chain_gc;
__u32 ndtc_proxy_qlen;
};

enum {
NDTA_UNSPEC,
NDTA_NAME,
NDTA_THRESH1,
NDTA_THRESH2,
NDTA_THRESH3,
NDTA_CONFIG,
NDTA_PARMS,
NDTA_STATS,
NDTA_GC_INTERVAL,
NDTA_PAD,
__NDTA_MAX
};






enum {
FDB_NOTIFY_BIT = (1 << 0),
FDB_NOTIFY_INACTIVE_BIT = (1 << 1)
};







enum {
NFEA_UNSPEC,
NFEA_ACTIVITY_NOTIFY,
NFEA_DONT_REFRESH,
__NFEA_MAX
};
# 47 "./include/linux/netdevice.h" 2
# 1 "./include/uapi/linux/netdevice.h" 1
# 30 "./include/uapi/linux/netdevice.h"
# 1 "./include/linux/if_ether.h" 1
# 22 "./include/linux/if_ether.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ethhdr *eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_mac_header(skb);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb->data;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_inner_mac_header(skb);
}

int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);

extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
# 31 "./include/uapi/linux/netdevice.h" 2

# 1 "./include/linux/if_link.h" 1




# 1 "./include/uapi/linux/if_link.h" 1








struct rtnl_link_stats {
__u32 rx_packets;
__u32 tx_packets;
__u32 rx_bytes;
__u32 tx_bytes;
__u32 rx_errors;
__u32 tx_errors;
__u32 rx_dropped;
__u32 tx_dropped;
__u32 multicast;
__u32 collisions;

__u32 rx_length_errors;
__u32 rx_over_errors;
__u32 rx_crc_errors;
__u32 rx_frame_errors;
__u32 rx_fifo_errors;
__u32 rx_missed_errors;


__u32 tx_aborted_errors;
__u32 tx_carrier_errors;
__u32 tx_fifo_errors;
__u32 tx_heartbeat_errors;
__u32 tx_window_errors;


__u32 rx_compressed;
__u32 tx_compressed;

__u32 rx_nohandler;
};
# 215 "./include/uapi/linux/if_link.h"
struct rtnl_link_stats64 {
__u64 rx_packets;
__u64 tx_packets;
__u64 rx_bytes;
__u64 tx_bytes;
__u64 rx_errors;
__u64 tx_errors;
__u64 rx_dropped;
__u64 tx_dropped;
__u64 multicast;
__u64 collisions;


__u64 rx_length_errors;
__u64 rx_over_errors;
__u64 rx_crc_errors;
__u64 rx_frame_errors;
__u64 rx_fifo_errors;
__u64 rx_missed_errors;


__u64 tx_aborted_errors;
__u64 tx_carrier_errors;
__u64 tx_fifo_errors;
__u64 tx_heartbeat_errors;
__u64 tx_window_errors;


__u64 rx_compressed;
__u64 tx_compressed;
__u64 rx_nohandler;
};




struct rtnl_hw_stats64 {
__u64 rx_packets;
__u64 tx_packets;
__u64 rx_bytes;
__u64 tx_bytes;
__u64 rx_errors;
__u64 tx_errors;
__u64 rx_dropped;
__u64 tx_dropped;
__u64 multicast;
};


struct rtnl_link_ifmap {
__u64 mem_start;
__u64 mem_end;
__u64 base_addr;
__u16 irq;
__u8 dma;
__u8 port;
};
# 291 "./include/uapi/linux/if_link.h"
enum {
IFLA_UNSPEC,
IFLA_ADDRESS,
IFLA_BROADCAST,
IFLA_IFNAME,
IFLA_MTU,
IFLA_LINK,
IFLA_QDISC,
IFLA_STATS,
IFLA_COST,

IFLA_PRIORITY,

IFLA_MASTER,

IFLA_WIRELESS,

IFLA_PROTINFO,

IFLA_TXQLEN,

IFLA_MAP,

IFLA_WEIGHT,

IFLA_OPERSTATE,
IFLA_LINKMODE,
IFLA_LINKINFO,

IFLA_NET_NS_PID,
IFLA_IFALIAS,
IFLA_NUM_VF,
IFLA_VFINFO_LIST,
IFLA_STATS64,
IFLA_VF_PORTS,
IFLA_PORT_SELF,
IFLA_AF_SPEC,
IFLA_GROUP,
IFLA_NET_NS_FD,
IFLA_EXT_MASK,
IFLA_PROMISCUITY,

IFLA_NUM_TX_QUEUES,
IFLA_NUM_RX_QUEUES,
IFLA_CARRIER,
IFLA_PHYS_PORT_ID,
IFLA_CARRIER_CHANGES,
IFLA_PHYS_SWITCH_ID,
IFLA_LINK_NETNSID,
IFLA_PHYS_PORT_NAME,
IFLA_PROTO_DOWN,
IFLA_GSO_MAX_SEGS,
IFLA_GSO_MAX_SIZE,
IFLA_PAD,
IFLA_XDP,
IFLA_EVENT,
IFLA_NEW_NETNSID,
IFLA_IF_NETNSID,
IFLA_TARGET_NETNSID = IFLA_IF_NETNSID,
IFLA_CARRIER_UP_COUNT,
IFLA_CARRIER_DOWN_COUNT,
IFLA_NEW_IFINDEX,
IFLA_MIN_MTU,
IFLA_MAX_MTU,
IFLA_PROP_LIST,
IFLA_ALT_IFNAME,
IFLA_PERM_ADDRESS,
IFLA_PROTO_DOWN_REASON,




IFLA_PARENT_DEV_NAME,
IFLA_PARENT_DEV_BUS_NAME,
IFLA_GRO_MAX_SIZE,

__IFLA_MAX
};




enum {
IFLA_PROTO_DOWN_REASON_UNSPEC,
IFLA_PROTO_DOWN_REASON_MASK,
IFLA_PROTO_DOWN_REASON_VALUE,

__IFLA_PROTO_DOWN_REASON_CNT,
IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1
};







enum {
IFLA_INET_UNSPEC,
IFLA_INET_CONF,
__IFLA_INET_MAX,
};
# 426 "./include/uapi/linux/if_link.h"
enum {
IFLA_INET6_UNSPEC,
IFLA_INET6_FLAGS,
IFLA_INET6_CONF,
IFLA_INET6_STATS,
IFLA_INET6_MCAST,
IFLA_INET6_CACHEINFO,
IFLA_INET6_ICMP6STATS,
IFLA_INET6_TOKEN,
IFLA_INET6_ADDR_GEN_MODE,
IFLA_INET6_RA_MTU,
__IFLA_INET6_MAX
};



enum in6_addr_gen_mode {
IN6_ADDR_GEN_MODE_EUI64,
IN6_ADDR_GEN_MODE_NONE,
IN6_ADDR_GEN_MODE_STABLE_PRIVACY,
IN6_ADDR_GEN_MODE_RANDOM,
};



enum {
IFLA_BR_UNSPEC,
IFLA_BR_FORWARD_DELAY,
IFLA_BR_HELLO_TIME,
IFLA_BR_MAX_AGE,
IFLA_BR_AGEING_TIME,
IFLA_BR_STP_STATE,
IFLA_BR_PRIORITY,
IFLA_BR_VLAN_FILTERING,
IFLA_BR_VLAN_PROTOCOL,
IFLA_BR_GROUP_FWD_MASK,
IFLA_BR_ROOT_ID,
IFLA_BR_BRIDGE_ID,
IFLA_BR_ROOT_PORT,
IFLA_BR_ROOT_PATH_COST,
IFLA_BR_TOPOLOGY_CHANGE,
IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
IFLA_BR_HELLO_TIMER,
IFLA_BR_TCN_TIMER,
IFLA_BR_TOPOLOGY_CHANGE_TIMER,
IFLA_BR_GC_TIMER,
IFLA_BR_GROUP_ADDR,
IFLA_BR_FDB_FLUSH,
IFLA_BR_MCAST_ROUTER,
IFLA_BR_MCAST_SNOOPING,
IFLA_BR_MCAST_QUERY_USE_IFADDR,
IFLA_BR_MCAST_QUERIER,
IFLA_BR_MCAST_HASH_ELASTICITY,
IFLA_BR_MCAST_HASH_MAX,
IFLA_BR_MCAST_LAST_MEMBER_CNT,
IFLA_BR_MCAST_STARTUP_QUERY_CNT,
IFLA_BR_MCAST_LAST_MEMBER_INTVL,
IFLA_BR_MCAST_MEMBERSHIP_INTVL,
IFLA_BR_MCAST_QUERIER_INTVL,
IFLA_BR_MCAST_QUERY_INTVL,
IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
IFLA_BR_NF_CALL_IPTABLES,
IFLA_BR_NF_CALL_IP6TABLES,
IFLA_BR_NF_CALL_ARPTABLES,
IFLA_BR_VLAN_DEFAULT_PVID,
IFLA_BR_PAD,
IFLA_BR_VLAN_STATS_ENABLED,
IFLA_BR_MCAST_STATS_ENABLED,
IFLA_BR_MCAST_IGMP_VERSION,
IFLA_BR_MCAST_MLD_VERSION,
IFLA_BR_VLAN_STATS_PER_PORT,
IFLA_BR_MULTI_BOOLOPT,
IFLA_BR_MCAST_QUERIER_STATE,
__IFLA_BR_MAX,
};



struct ifla_bridge_id {
__u8 prio[2];
__u8 addr[6];
};

enum {
BRIDGE_MODE_UNSPEC,
BRIDGE_MODE_HAIRPIN,
};

enum {
IFLA_BRPORT_UNSPEC,
IFLA_BRPORT_STATE,
IFLA_BRPORT_PRIORITY,
IFLA_BRPORT_COST,
IFLA_BRPORT_MODE,
IFLA_BRPORT_GUARD,
IFLA_BRPORT_PROTECT,
IFLA_BRPORT_FAST_LEAVE,
IFLA_BRPORT_LEARNING,
IFLA_BRPORT_UNICAST_FLOOD,
IFLA_BRPORT_PROXYARP,
IFLA_BRPORT_LEARNING_SYNC,
IFLA_BRPORT_PROXYARP_WIFI,
IFLA_BRPORT_ROOT_ID,
IFLA_BRPORT_BRIDGE_ID,
IFLA_BRPORT_DESIGNATED_PORT,
IFLA_BRPORT_DESIGNATED_COST,
IFLA_BRPORT_ID,
IFLA_BRPORT_NO,
IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
IFLA_BRPORT_CONFIG_PENDING,
IFLA_BRPORT_MESSAGE_AGE_TIMER,
IFLA_BRPORT_FORWARD_DELAY_TIMER,
IFLA_BRPORT_HOLD_TIMER,
IFLA_BRPORT_FLUSH,
IFLA_BRPORT_MULTICAST_ROUTER,
IFLA_BRPORT_PAD,
IFLA_BRPORT_MCAST_FLOOD,
IFLA_BRPORT_MCAST_TO_UCAST,
IFLA_BRPORT_VLAN_TUNNEL,
IFLA_BRPORT_BCAST_FLOOD,
IFLA_BRPORT_GROUP_FWD_MASK,
IFLA_BRPORT_NEIGH_SUPPRESS,
IFLA_BRPORT_ISOLATED,
IFLA_BRPORT_BACKUP_PORT,
IFLA_BRPORT_MRP_RING_OPEN,
IFLA_BRPORT_MRP_IN_OPEN,
IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
IFLA_BRPORT_LOCKED,
__IFLA_BRPORT_MAX
};


struct ifla_cacheinfo {
__u32 max_reasm_len;
__u32 tstamp;
__u32 reachable_time;
__u32 retrans_time;
};

enum {
IFLA_INFO_UNSPEC,
IFLA_INFO_KIND,
IFLA_INFO_DATA,
IFLA_INFO_XSTATS,
IFLA_INFO_SLAVE_KIND,
IFLA_INFO_SLAVE_DATA,
__IFLA_INFO_MAX,
};





enum {
IFLA_VLAN_UNSPEC,
IFLA_VLAN_ID,
IFLA_VLAN_FLAGS,
IFLA_VLAN_EGRESS_QOS,
IFLA_VLAN_INGRESS_QOS,
IFLA_VLAN_PROTOCOL,
__IFLA_VLAN_MAX,
};



struct ifla_vlan_flags {
__u32 flags;
__u32 mask;
};

enum {
IFLA_VLAN_QOS_UNSPEC,
IFLA_VLAN_QOS_MAPPING,
__IFLA_VLAN_QOS_MAX
};



struct ifla_vlan_qos_mapping {
__u32 from;
__u32 to;
};


enum {
IFLA_MACVLAN_UNSPEC,
IFLA_MACVLAN_MODE,
IFLA_MACVLAN_FLAGS,
IFLA_MACVLAN_MACADDR_MODE,
IFLA_MACVLAN_MACADDR,
IFLA_MACVLAN_MACADDR_DATA,
IFLA_MACVLAN_MACADDR_COUNT,
IFLA_MACVLAN_BC_QUEUE_LEN,
IFLA_MACVLAN_BC_QUEUE_LEN_USED,
__IFLA_MACVLAN_MAX,
};



enum macvlan_mode {
MACVLAN_MODE_PRIVATE = 1,
MACVLAN_MODE_VEPA = 2,
MACVLAN_MODE_BRIDGE = 4,
MACVLAN_MODE_PASSTHRU = 8,
MACVLAN_MODE_SOURCE = 16,
};

enum macvlan_macaddr_mode {
MACVLAN_MACADDR_ADD,
MACVLAN_MACADDR_DEL,
MACVLAN_MACADDR_FLUSH,
MACVLAN_MACADDR_SET,
};





enum {
IFLA_VRF_UNSPEC,
IFLA_VRF_TABLE,
__IFLA_VRF_MAX
};



enum {
IFLA_VRF_PORT_UNSPEC,
IFLA_VRF_PORT_TABLE,
__IFLA_VRF_PORT_MAX
};




enum {
IFLA_MACSEC_UNSPEC,
IFLA_MACSEC_SCI,
IFLA_MACSEC_PORT,
IFLA_MACSEC_ICV_LEN,
IFLA_MACSEC_CIPHER_SUITE,
IFLA_MACSEC_WINDOW,
IFLA_MACSEC_ENCODING_SA,
IFLA_MACSEC_ENCRYPT,
IFLA_MACSEC_PROTECT,
IFLA_MACSEC_INC_SCI,
IFLA_MACSEC_ES,
IFLA_MACSEC_SCB,
IFLA_MACSEC_REPLAY_PROTECT,
IFLA_MACSEC_VALIDATION,
IFLA_MACSEC_PAD,
IFLA_MACSEC_OFFLOAD,
__IFLA_MACSEC_MAX,
};




enum {
IFLA_XFRM_UNSPEC,
IFLA_XFRM_LINK,
IFLA_XFRM_IF_ID,
__IFLA_XFRM_MAX
};



enum macsec_validation_type {
MACSEC_VALIDATE_DISABLED = 0,
MACSEC_VALIDATE_CHECK = 1,
MACSEC_VALIDATE_STRICT = 2,
__MACSEC_VALIDATE_END,
MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1,
};

enum macsec_offload {
MACSEC_OFFLOAD_OFF = 0,
MACSEC_OFFLOAD_PHY = 1,
MACSEC_OFFLOAD_MAC = 2,
__MACSEC_OFFLOAD_END,
MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1,
};


enum {
IFLA_IPVLAN_UNSPEC,
IFLA_IPVLAN_MODE,
IFLA_IPVLAN_FLAGS,
__IFLA_IPVLAN_MAX
};



enum ipvlan_mode {
IPVLAN_MODE_L2 = 0,
IPVLAN_MODE_L3,
IPVLAN_MODE_L3S,
IPVLAN_MODE_MAX
};





struct tunnel_msg {
__u8 family;
__u8 flags;
__u16 reserved2;
__u32 ifindex;
};
# 747 "./include/uapi/linux/if_link.h"
enum {
VNIFILTER_ENTRY_STATS_UNSPEC,
VNIFILTER_ENTRY_STATS_RX_BYTES,
VNIFILTER_ENTRY_STATS_RX_PKTS,
VNIFILTER_ENTRY_STATS_RX_DROPS,
VNIFILTER_ENTRY_STATS_RX_ERRORS,
VNIFILTER_ENTRY_STATS_TX_BYTES,
VNIFILTER_ENTRY_STATS_TX_PKTS,
VNIFILTER_ENTRY_STATS_TX_DROPS,
VNIFILTER_ENTRY_STATS_TX_ERRORS,
VNIFILTER_ENTRY_STATS_PAD,
__VNIFILTER_ENTRY_STATS_MAX
};


enum {
VXLAN_VNIFILTER_ENTRY_UNSPEC,
VXLAN_VNIFILTER_ENTRY_START,
VXLAN_VNIFILTER_ENTRY_END,
VXLAN_VNIFILTER_ENTRY_GROUP,
VXLAN_VNIFILTER_ENTRY_GROUP6,
VXLAN_VNIFILTER_ENTRY_STATS,
__VXLAN_VNIFILTER_ENTRY_MAX
};


enum {
VXLAN_VNIFILTER_UNSPEC,
VXLAN_VNIFILTER_ENTRY,
__VXLAN_VNIFILTER_MAX
};


enum {
IFLA_VXLAN_UNSPEC,
IFLA_VXLAN_ID,
IFLA_VXLAN_GROUP,
IFLA_VXLAN_LINK,
IFLA_VXLAN_LOCAL,
IFLA_VXLAN_TTL,
IFLA_VXLAN_TOS,
IFLA_VXLAN_LEARNING,
IFLA_VXLAN_AGEING,
IFLA_VXLAN_LIMIT,
IFLA_VXLAN_PORT_RANGE,
IFLA_VXLAN_PROXY,
IFLA_VXLAN_RSC,
IFLA_VXLAN_L2MISS,
IFLA_VXLAN_L3MISS,
IFLA_VXLAN_PORT,
IFLA_VXLAN_GROUP6,
IFLA_VXLAN_LOCAL6,
IFLA_VXLAN_UDP_CSUM,
IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
IFLA_VXLAN_REMCSUM_TX,
IFLA_VXLAN_REMCSUM_RX,
IFLA_VXLAN_GBP,
IFLA_VXLAN_REMCSUM_NOPARTIAL,
IFLA_VXLAN_COLLECT_METADATA,
IFLA_VXLAN_LABEL,
IFLA_VXLAN_GPE,
IFLA_VXLAN_TTL_INHERIT,
IFLA_VXLAN_DF,
IFLA_VXLAN_VNIFILTER,
__IFLA_VXLAN_MAX
};


struct ifla_vxlan_port_range {
__be16 low;
__be16 high;
};

enum ifla_vxlan_df {
VXLAN_DF_UNSET = 0,
VXLAN_DF_SET,
VXLAN_DF_INHERIT,
__VXLAN_DF_END,
VXLAN_DF_MAX = __VXLAN_DF_END - 1,
};


enum {
IFLA_GENEVE_UNSPEC,
IFLA_GENEVE_ID,
IFLA_GENEVE_REMOTE,
IFLA_GENEVE_TTL,
IFLA_GENEVE_TOS,
IFLA_GENEVE_PORT,
IFLA_GENEVE_COLLECT_METADATA,
IFLA_GENEVE_REMOTE6,
IFLA_GENEVE_UDP_CSUM,
IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
IFLA_GENEVE_LABEL,
IFLA_GENEVE_TTL_INHERIT,
IFLA_GENEVE_DF,
IFLA_GENEVE_INNER_PROTO_INHERIT,
__IFLA_GENEVE_MAX
};


enum ifla_geneve_df {
GENEVE_DF_UNSET = 0,
GENEVE_DF_SET,
GENEVE_DF_INHERIT,
__GENEVE_DF_END,
GENEVE_DF_MAX = __GENEVE_DF_END - 1,
};


enum {
IFLA_BAREUDP_UNSPEC,
IFLA_BAREUDP_PORT,
IFLA_BAREUDP_ETHERTYPE,
IFLA_BAREUDP_SRCPORT_MIN,
IFLA_BAREUDP_MULTIPROTO_MODE,
__IFLA_BAREUDP_MAX
};




enum {
IFLA_PPP_UNSPEC,
IFLA_PPP_DEV_FD,
__IFLA_PPP_MAX
};




enum ifla_gtp_role {
GTP_ROLE_GGSN = 0,
GTP_ROLE_SGSN,
};

enum {
IFLA_GTP_UNSPEC,
IFLA_GTP_FD0,
IFLA_GTP_FD1,
IFLA_GTP_PDP_HASHSIZE,
IFLA_GTP_ROLE,
IFLA_GTP_CREATE_SOCKETS,
IFLA_GTP_RESTART_COUNT,
__IFLA_GTP_MAX,
};




enum {
IFLA_BOND_UNSPEC,
IFLA_BOND_MODE,
IFLA_BOND_ACTIVE_SLAVE,
IFLA_BOND_MIIMON,
IFLA_BOND_UPDELAY,
IFLA_BOND_DOWNDELAY,
IFLA_BOND_USE_CARRIER,
IFLA_BOND_ARP_INTERVAL,
IFLA_BOND_ARP_IP_TARGET,
IFLA_BOND_ARP_VALIDATE,
IFLA_BOND_ARP_ALL_TARGETS,
IFLA_BOND_PRIMARY,
IFLA_BOND_PRIMARY_RESELECT,
IFLA_BOND_FAIL_OVER_MAC,
IFLA_BOND_XMIT_HASH_POLICY,
IFLA_BOND_RESEND_IGMP,
IFLA_BOND_NUM_PEER_NOTIF,
IFLA_BOND_ALL_SLAVES_ACTIVE,
IFLA_BOND_MIN_LINKS,
IFLA_BOND_LP_INTERVAL,
IFLA_BOND_PACKETS_PER_SLAVE,
IFLA_BOND_AD_LACP_RATE,
IFLA_BOND_AD_SELECT,
IFLA_BOND_AD_INFO,
IFLA_BOND_AD_ACTOR_SYS_PRIO,
IFLA_BOND_AD_USER_PORT_KEY,
IFLA_BOND_AD_ACTOR_SYSTEM,
IFLA_BOND_TLB_DYNAMIC_LB,
IFLA_BOND_PEER_NOTIF_DELAY,
IFLA_BOND_AD_LACP_ACTIVE,
IFLA_BOND_MISSED_MAX,
IFLA_BOND_NS_IP6_TARGET,
__IFLA_BOND_MAX,
};



enum {
IFLA_BOND_AD_INFO_UNSPEC,
IFLA_BOND_AD_INFO_AGGREGATOR,
IFLA_BOND_AD_INFO_NUM_PORTS,
IFLA_BOND_AD_INFO_ACTOR_KEY,
IFLA_BOND_AD_INFO_PARTNER_KEY,
IFLA_BOND_AD_INFO_PARTNER_MAC,
__IFLA_BOND_AD_INFO_MAX,
};



enum {
IFLA_BOND_SLAVE_UNSPEC,
IFLA_BOND_SLAVE_STATE,
IFLA_BOND_SLAVE_MII_STATUS,
IFLA_BOND_SLAVE_LINK_FAILURE_COUNT,
IFLA_BOND_SLAVE_PERM_HWADDR,
IFLA_BOND_SLAVE_QUEUE_ID,
IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
__IFLA_BOND_SLAVE_MAX,
};





enum {
IFLA_VF_INFO_UNSPEC,
IFLA_VF_INFO,
__IFLA_VF_INFO_MAX,
};



enum {
IFLA_VF_UNSPEC,
IFLA_VF_MAC,
IFLA_VF_VLAN,
IFLA_VF_TX_RATE,
IFLA_VF_SPOOFCHK,
IFLA_VF_LINK_STATE,
IFLA_VF_RATE,
IFLA_VF_RSS_QUERY_EN,


IFLA_VF_STATS,
IFLA_VF_TRUST,
IFLA_VF_IB_NODE_GUID,
IFLA_VF_IB_PORT_GUID,
IFLA_VF_VLAN_LIST,
IFLA_VF_BROADCAST,
__IFLA_VF_MAX,
};



struct ifla_vf_mac {
__u32 vf;
__u8 mac[32];
};

struct ifla_vf_broadcast {
__u8 broadcast[32];
};

struct ifla_vf_vlan {
__u32 vf;
__u32 vlan;
__u32 qos;
};

enum {
IFLA_VF_VLAN_INFO_UNSPEC,
IFLA_VF_VLAN_INFO,
__IFLA_VF_VLAN_INFO_MAX,
};




struct ifla_vf_vlan_info {
__u32 vf;
__u32 vlan;
__u32 qos;
__be16 vlan_proto;
};

struct ifla_vf_tx_rate {
__u32 vf;
__u32 rate;
};

struct ifla_vf_rate {
__u32 vf;
__u32 min_tx_rate;
__u32 max_tx_rate;
};

struct ifla_vf_spoofchk {
__u32 vf;
__u32 setting;
};

struct ifla_vf_guid {
__u32 vf;
__u64 guid;
};

enum {
IFLA_VF_LINK_STATE_AUTO,
IFLA_VF_LINK_STATE_ENABLE,
IFLA_VF_LINK_STATE_DISABLE,
__IFLA_VF_LINK_STATE_MAX,
};

struct ifla_vf_link_state {
__u32 vf;
__u32 link_state;
};

struct ifla_vf_rss_query_en {
__u32 vf;
__u32 setting;
};

enum {
IFLA_VF_STATS_RX_PACKETS,
IFLA_VF_STATS_TX_PACKETS,
IFLA_VF_STATS_RX_BYTES,
IFLA_VF_STATS_TX_BYTES,
IFLA_VF_STATS_BROADCAST,
IFLA_VF_STATS_MULTICAST,
IFLA_VF_STATS_PAD,
IFLA_VF_STATS_RX_DROPPED,
IFLA_VF_STATS_TX_DROPPED,
__IFLA_VF_STATS_MAX,
};



struct ifla_vf_trust {
__u32 vf;
__u32 setting;
};
# 1100 "./include/uapi/linux/if_link.h"
enum {
IFLA_VF_PORT_UNSPEC,
IFLA_VF_PORT,
__IFLA_VF_PORT_MAX,
};



enum {
IFLA_PORT_UNSPEC,
IFLA_PORT_VF,
IFLA_PORT_PROFILE,
IFLA_PORT_VSI_TYPE,
IFLA_PORT_INSTANCE_UUID,
IFLA_PORT_HOST_UUID,
IFLA_PORT_REQUEST,
IFLA_PORT_RESPONSE,
__IFLA_PORT_MAX,
};







enum {
PORT_REQUEST_PREASSOCIATE = 0,
PORT_REQUEST_PREASSOCIATE_RR,
PORT_REQUEST_ASSOCIATE,
PORT_REQUEST_DISASSOCIATE,
};

enum {
PORT_VDP_RESPONSE_SUCCESS = 0,
PORT_VDP_RESPONSE_INVALID_FORMAT,
PORT_VDP_RESPONSE_INSUFFICIENT_RESOURCES,
PORT_VDP_RESPONSE_UNUSED_VTID,
PORT_VDP_RESPONSE_VTID_VIOLATION,
PORT_VDP_RESPONSE_VTID_VERSION_VIOALTION,
PORT_VDP_RESPONSE_OUT_OF_SYNC,

PORT_PROFILE_RESPONSE_SUCCESS = 0x100,
PORT_PROFILE_RESPONSE_INPROGRESS,
PORT_PROFILE_RESPONSE_INVALID,
PORT_PROFILE_RESPONSE_BADSTATE,
PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES,
PORT_PROFILE_RESPONSE_ERROR,
};

struct ifla_port_vsi {
__u8 vsi_mgr_id;
__u8 vsi_type_id[3];
__u8 vsi_type_version;
__u8 pad[3];
};




enum {
IFLA_IPOIB_UNSPEC,
IFLA_IPOIB_PKEY,
IFLA_IPOIB_MODE,
IFLA_IPOIB_UMCAST,
__IFLA_IPOIB_MAX
};

enum {
IPOIB_MODE_DATAGRAM = 0,
IPOIB_MODE_CONNECTED = 1,
};







enum {
HSR_PROTOCOL_HSR,
HSR_PROTOCOL_PRP,
HSR_PROTOCOL_MAX,
};

enum {
IFLA_HSR_UNSPEC,
IFLA_HSR_SLAVE1,
IFLA_HSR_SLAVE2,
IFLA_HSR_MULTICAST_SPEC,
IFLA_HSR_SUPERVISION_ADDR,
IFLA_HSR_SEQ_NR,
IFLA_HSR_VERSION,
IFLA_HSR_PROTOCOL,


__IFLA_HSR_MAX,
};





struct if_stats_msg {
__u8 family;
__u8 pad1;
__u16 pad2;
__u32 ifindex;
__u32 filter_mask;
};




enum {
IFLA_STATS_UNSPEC,
IFLA_STATS_LINK_64,
IFLA_STATS_LINK_XSTATS,
IFLA_STATS_LINK_XSTATS_SLAVE,
IFLA_STATS_LINK_OFFLOAD_XSTATS,
IFLA_STATS_AF_SPEC,
__IFLA_STATS_MAX,
};





enum {
IFLA_STATS_GETSET_UNSPEC,
IFLA_STATS_GET_FILTERS,


IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS,
__IFLA_STATS_GETSET_MAX,
};
# 1244 "./include/uapi/linux/if_link.h"
enum {
LINK_XSTATS_TYPE_UNSPEC,
LINK_XSTATS_TYPE_BRIDGE,
LINK_XSTATS_TYPE_BOND,
__LINK_XSTATS_TYPE_MAX
};



enum {
IFLA_OFFLOAD_XSTATS_UNSPEC,
IFLA_OFFLOAD_XSTATS_CPU_HIT,
IFLA_OFFLOAD_XSTATS_HW_S_INFO,
IFLA_OFFLOAD_XSTATS_L3_STATS,
__IFLA_OFFLOAD_XSTATS_MAX
};


enum {
IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC,
IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST,
IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED,
__IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX,
};
# 1285 "./include/uapi/linux/if_link.h"
enum {
XDP_ATTACHED_NONE = 0,
XDP_ATTACHED_DRV,
XDP_ATTACHED_SKB,
XDP_ATTACHED_HW,
XDP_ATTACHED_MULTI,
};

enum {
IFLA_XDP_UNSPEC,
IFLA_XDP_FD,
IFLA_XDP_ATTACHED,
IFLA_XDP_FLAGS,
IFLA_XDP_PROG_ID,
IFLA_XDP_DRV_PROG_ID,
IFLA_XDP_SKB_PROG_ID,
IFLA_XDP_HW_PROG_ID,
IFLA_XDP_EXPECTED_FD,
__IFLA_XDP_MAX,
};



enum {
IFLA_EVENT_NONE,
IFLA_EVENT_REBOOT,
IFLA_EVENT_FEATURES,
IFLA_EVENT_BONDING_FAILOVER,
IFLA_EVENT_NOTIFY_PEERS,
IFLA_EVENT_IGMP_RESEND,
IFLA_EVENT_BONDING_OPTIONS,
};



enum {
IFLA_TUN_UNSPEC,
IFLA_TUN_OWNER,
IFLA_TUN_GROUP,
IFLA_TUN_TYPE,
IFLA_TUN_PI,
IFLA_TUN_VNET_HDR,
IFLA_TUN_PERSIST,
IFLA_TUN_MULTI_QUEUE,
IFLA_TUN_NUM_QUEUES,
IFLA_TUN_NUM_DISABLED_QUEUES,
__IFLA_TUN_MAX,
};
# 1345 "./include/uapi/linux/if_link.h"
enum {
IFLA_RMNET_UNSPEC,
IFLA_RMNET_MUX_ID,
IFLA_RMNET_FLAGS,
__IFLA_RMNET_MAX,
};



struct ifla_rmnet_flags {
__u32 flags;
__u32 mask;
};



enum {
IFLA_MCTP_UNSPEC,
IFLA_MCTP_NET,
__IFLA_MCTP_MAX,
};
# 6 "./include/linux/if_link.h" 2



struct ifla_vf_stats {
__u64 rx_packets;
__u64 tx_packets;
__u64 rx_bytes;
__u64 tx_bytes;
__u64 broadcast;
__u64 multicast;
__u64 rx_dropped;
__u64 tx_dropped;
};

struct ifla_vf_info {
__u32 vf;
__u8 mac[32];
__u32 vlan;
__u32 qos;
__u32 spoofchk;
__u32 linkstate;
__u32 min_tx_rate;
__u32 max_tx_rate;
__u32 rss_query_en;
__u32 trusted;
__be16 vlan_proto;
};
# 33 "./include/uapi/linux/netdevice.h" 2
# 49 "./include/uapi/linux/netdevice.h"
enum {
IF_PORT_UNKNOWN = 0,
IF_PORT_10BASE2,
IF_PORT_10BASET,
IF_PORT_AUI,
IF_PORT_100BASET,
IF_PORT_100BASETX,
IF_PORT_100BASEFX
};
# 48 "./include/linux/netdevice.h" 2
# 1 "./include/uapi/linux/if_bonding.h" 1
# 109 "./include/uapi/linux/if_bonding.h"
typedef struct ifbond {
__s32 bond_mode;
__s32 num_slaves;
__s32 miimon;
} ifbond;

typedef struct ifslave {
__s32 slave_id;
char slave_name[16];
__s8 link;
__s8 state;
__u32 link_failure_count;
} ifslave;

struct ad_info {
__u16 aggregator_id;
__u16 ports;
__u16 actor_key;
__u16 partner_key;
__u8 partner_system[6];
};


enum {
BOND_XSTATS_UNSPEC,
BOND_XSTATS_3AD,
__BOND_XSTATS_MAX
};



enum {
BOND_3AD_STAT_LACPDU_RX,
BOND_3AD_STAT_LACPDU_TX,
BOND_3AD_STAT_LACPDU_UNKNOWN_RX,
BOND_3AD_STAT_LACPDU_ILLEGAL_RX,
BOND_3AD_STAT_MARKER_RX,
BOND_3AD_STAT_MARKER_TX,
BOND_3AD_STAT_MARKER_RESP_RX,
BOND_3AD_STAT_MARKER_RESP_TX,
BOND_3AD_STAT_MARKER_UNKNOWN_RX,
BOND_3AD_STAT_PAD,
__BOND_3AD_STAT_MAX
};
# 49 "./include/linux/netdevice.h" 2
# 1 "./include/uapi/linux/pkt_cls.h" 1





# 1 "./include/uapi/linux/pkt_sched.h" 1
# 34 "./include/uapi/linux/pkt_sched.h"
struct tc_stats {
__u64 bytes;
__u32 packets;
__u32 drops;
__u32 overlimits;

__u32 bps;
__u32 pps;
__u32 qlen;
__u32 backlog;
};

struct tc_estimator {
signed char interval;
unsigned char ewma_log;
};
# 84 "./include/uapi/linux/pkt_sched.h"
enum tc_link_layer {
TC_LINKLAYER_UNAWARE,
TC_LINKLAYER_ETHERNET,
TC_LINKLAYER_ATM,
};


struct tc_ratespec {
unsigned char cell_log;
__u8 linklayer;
unsigned short overhead;
short cell_align;
unsigned short mpu;
__u32 rate;
};



struct tc_sizespec {
unsigned char cell_log;
unsigned char size_log;
short cell_align;
int overhead;
unsigned int linklayer;
unsigned int mpu;
unsigned int mtu;
unsigned int tsize;
};

enum {
TCA_STAB_UNSPEC,
TCA_STAB_BASE,
TCA_STAB_DATA,
__TCA_STAB_MAX
};





struct tc_fifo_qopt {
__u32 limit;
};
# 139 "./include/uapi/linux/pkt_sched.h"
struct tc_skbprio_qopt {
__u32 limit;
};






struct tc_prio_qopt {
int bands;
__u8 priomap[15 +1];
};



struct tc_multiq_qopt {
__u16 bands;
__u16 max_bands;
};
# 167 "./include/uapi/linux/pkt_sched.h"
struct tc_plug_qopt {
# 177 "./include/uapi/linux/pkt_sched.h"
int action;
__u32 limit;
};



struct tc_tbf_qopt {
struct tc_ratespec rate;
struct tc_ratespec peakrate;
__u32 limit;
__u32 buffer;
__u32 mtu;
};

enum {
TCA_TBF_UNSPEC,
TCA_TBF_PARMS,
TCA_TBF_RTAB,
TCA_TBF_PTAB,
TCA_TBF_RATE64,
TCA_TBF_PRATE64,
TCA_TBF_BURST,
TCA_TBF_PBURST,
TCA_TBF_PAD,
__TCA_TBF_MAX,
};
# 213 "./include/uapi/linux/pkt_sched.h"
struct tc_sfq_qopt {
unsigned quantum;
int perturb_period;
__u32 limit;
unsigned divisor;
unsigned flows;
};

struct tc_sfqred_stats {
__u32 prob_drop;
__u32 forced_drop;
__u32 prob_mark;
__u32 forced_mark;
__u32 prob_mark_head;
__u32 forced_mark_head;
};

struct tc_sfq_qopt_v1 {
struct tc_sfq_qopt v0;
unsigned int depth;
unsigned int headdrop;

__u32 limit;
__u32 qth_min;
__u32 qth_max;
unsigned char Wlog;
unsigned char Plog;
unsigned char Scell_log;
unsigned char flags;
__u32 max_P;

struct tc_sfqred_stats stats;
};


struct tc_sfq_xstats {
__s32 allot;
};



enum {
TCA_RED_UNSPEC,
TCA_RED_PARMS,
TCA_RED_STAB,
TCA_RED_MAX_P,
TCA_RED_FLAGS,
TCA_RED_EARLY_DROP_BLOCK,
TCA_RED_MARK_BLOCK,
__TCA_RED_MAX,
};



struct tc_red_qopt {
__u32 limit;
__u32 qth_min;
__u32 qth_max;
unsigned char Wlog;
unsigned char Plog;
unsigned char Scell_log;
# 287 "./include/uapi/linux/pkt_sched.h"
unsigned char flags;




};



struct tc_red_xstats {
__u32 early;
__u32 pdrop;
__u32 other;
__u32 marked;
};





enum {
TCA_GRED_UNSPEC,
TCA_GRED_PARMS,
TCA_GRED_STAB,
TCA_GRED_DPS,
TCA_GRED_MAX_P,
TCA_GRED_LIMIT,
TCA_GRED_VQ_LIST,
__TCA_GRED_MAX,
};



enum {
TCA_GRED_VQ_ENTRY_UNSPEC,
TCA_GRED_VQ_ENTRY,
__TCA_GRED_VQ_ENTRY_MAX,
};


enum {
TCA_GRED_VQ_UNSPEC,
TCA_GRED_VQ_PAD,
TCA_GRED_VQ_DP,
TCA_GRED_VQ_STAT_BYTES,
TCA_GRED_VQ_STAT_PACKETS,
TCA_GRED_VQ_STAT_BACKLOG,
TCA_GRED_VQ_STAT_PROB_DROP,
TCA_GRED_VQ_STAT_PROB_MARK,
TCA_GRED_VQ_STAT_FORCED_DROP,
TCA_GRED_VQ_STAT_FORCED_MARK,
TCA_GRED_VQ_STAT_PDROP,
TCA_GRED_VQ_STAT_OTHER,
TCA_GRED_VQ_FLAGS,
__TCA_GRED_VQ_MAX
};



struct tc_gred_qopt {
__u32 limit;
__u32 qth_min;
__u32 qth_max;
__u32 DP;
__u32 backlog;
__u32 qave;
__u32 forced;
__u32 early;
__u32 other;
__u32 pdrop;
__u8 Wlog;
__u8 Plog;
__u8 Scell_log;
__u8 prio;
__u32 packets;
__u32 bytesin;
};


struct tc_gred_sopt {
__u32 DPs;
__u32 def_DP;
__u8 grio;
__u8 flags;
__u16 pad1;
};



enum {
TCA_CHOKE_UNSPEC,
TCA_CHOKE_PARMS,
TCA_CHOKE_STAB,
TCA_CHOKE_MAX_P,
__TCA_CHOKE_MAX,
};



struct tc_choke_qopt {
__u32 limit;
__u32 qth_min;
__u32 qth_max;
unsigned char Wlog;
unsigned char Plog;
unsigned char Scell_log;
unsigned char flags;
};

struct tc_choke_xstats {
__u32 early;
__u32 pdrop;
__u32 other;
__u32 marked;
__u32 matched;
};






struct tc_htb_opt {
struct tc_ratespec rate;
struct tc_ratespec ceil;
__u32 buffer;
__u32 cbuffer;
__u32 quantum;
__u32 level;
__u32 prio;
};
struct tc_htb_glob {
__u32 version;
__u32 rate2quantum;
__u32 defcls;
__u32 debug;


__u32 direct_pkts;
};
enum {
TCA_HTB_UNSPEC,
TCA_HTB_PARMS,
TCA_HTB_INIT,
TCA_HTB_CTAB,
TCA_HTB_RTAB,
TCA_HTB_DIRECT_QLEN,
TCA_HTB_RATE64,
TCA_HTB_CEIL64,
TCA_HTB_PAD,
TCA_HTB_OFFLOAD,
__TCA_HTB_MAX,
};



struct tc_htb_xstats {
__u32 lends;
__u32 borrows;
__u32 giants;
__s32 tokens;
__s32 ctokens;
};



struct tc_hfsc_qopt {
__u16 defcls;
};

struct tc_service_curve {
__u32 m1;
__u32 d;
__u32 m2;
};

struct tc_hfsc_stats {
__u64 work;
__u64 rtwork;
__u32 period;
__u32 level;
};

enum {
TCA_HFSC_UNSPEC,
TCA_HFSC_RSC,
TCA_HFSC_FSC,
TCA_HFSC_USC,
__TCA_HFSC_MAX,
};
# 487 "./include/uapi/linux/pkt_sched.h"
struct tc_cbq_lssopt {
unsigned char change;
unsigned char flags;


unsigned char ewma_log;
unsigned char level;






__u32 maxidle;
__u32 minidle;
__u32 offtime;
__u32 avpkt;
};

struct tc_cbq_wrropt {
unsigned char flags;
unsigned char priority;
unsigned char cpriority;
unsigned char __reserved;
__u32 allot;
__u32 weight;
};

struct tc_cbq_ovl {
unsigned char strategy;





unsigned char priority2;
__u16 pad;
__u32 penalty;
};

struct tc_cbq_police {
unsigned char police;
unsigned char __res1;
unsigned short __res2;
};

struct tc_cbq_fopt {
__u32 split;
__u32 defmap;
__u32 defchange;
};

struct tc_cbq_xstats {
__u32 borrows;
__u32 overactions;
__s32 avgidle;
__s32 undertime;
};

enum {
TCA_CBQ_UNSPEC,
TCA_CBQ_LSSOPT,
TCA_CBQ_WRROPT,
TCA_CBQ_FOPT,
TCA_CBQ_OVL_STRATEGY,
TCA_CBQ_RATE,
TCA_CBQ_RTAB,
TCA_CBQ_POLICE,
__TCA_CBQ_MAX,
};





enum {
TCA_DSMARK_UNSPEC,
TCA_DSMARK_INDICES,
TCA_DSMARK_DEFAULT_INDEX,
TCA_DSMARK_SET_TC_INDEX,
TCA_DSMARK_MASK,
TCA_DSMARK_VALUE,
__TCA_DSMARK_MAX,
};





enum {
TCA_ATM_UNSPEC,
TCA_ATM_FD,
TCA_ATM_PTR,
TCA_ATM_HDR,
TCA_ATM_EXCESS,
TCA_ATM_ADDR,
TCA_ATM_STATE,
__TCA_ATM_MAX,
};





enum {
TCA_NETEM_UNSPEC,
TCA_NETEM_CORR,
TCA_NETEM_DELAY_DIST,
TCA_NETEM_REORDER,
TCA_NETEM_CORRUPT,
TCA_NETEM_LOSS,
TCA_NETEM_RATE,
TCA_NETEM_ECN,
TCA_NETEM_RATE64,
TCA_NETEM_PAD,
TCA_NETEM_LATENCY64,
TCA_NETEM_JITTER64,
TCA_NETEM_SLOT,
TCA_NETEM_SLOT_DIST,
__TCA_NETEM_MAX,
};



struct tc_netem_qopt {
__u32 latency;
__u32 limit;
__u32 loss;
__u32 gap;
__u32 duplicate;
__u32 jitter;
};

struct tc_netem_corr {
__u32 delay_corr;
__u32 loss_corr;
__u32 dup_corr;
};

struct tc_netem_reorder {
__u32 probability;
__u32 correlation;
};

struct tc_netem_corrupt {
__u32 probability;
__u32 correlation;
};

struct tc_netem_rate {
__u32 rate;
__s32 packet_overhead;
__u32 cell_size;
__s32 cell_overhead;
};

struct tc_netem_slot {
__s64 min_delay;
__s64 max_delay;
__s32 max_packets;
__s32 max_bytes;
__s64 dist_delay;
__s64 dist_jitter;
};

enum {
NETEM_LOSS_UNSPEC,
NETEM_LOSS_GI,
NETEM_LOSS_GE,
__NETEM_LOSS_MAX
};



struct tc_netem_gimodel {
__u32 p13;
__u32 p31;
__u32 p32;
__u32 p14;
__u32 p23;
};


struct tc_netem_gemodel {
__u32 p;
__u32 r;
__u32 h;
__u32 k1;
};






enum {
TCA_DRR_UNSPEC,
TCA_DRR_QUANTUM,
__TCA_DRR_MAX
};



struct tc_drr_stats {
__u32 deficit;
};





enum {
TC_MQPRIO_HW_OFFLOAD_NONE,
TC_MQPRIO_HW_OFFLOAD_TCS,
__TC_MQPRIO_HW_OFFLOAD_MAX
};



enum {
TC_MQPRIO_MODE_DCB,
TC_MQPRIO_MODE_CHANNEL,
__TC_MQPRIO_MODE_MAX
};



enum {
TC_MQPRIO_SHAPER_DCB,
TC_MQPRIO_SHAPER_BW_RATE,
__TC_MQPRIO_SHAPER_MAX
};



struct tc_mqprio_qopt {
__u8 num_tc;
__u8 prio_tc_map[15 + 1];
__u8 hw;
__u16 count[16];
__u16 offset[16];
};






enum {
TCA_MQPRIO_UNSPEC,
TCA_MQPRIO_MODE,
TCA_MQPRIO_SHAPER,
TCA_MQPRIO_MIN_RATE64,
TCA_MQPRIO_MAX_RATE64,
__TCA_MQPRIO_MAX,
};





enum {
TCA_SFB_UNSPEC,
TCA_SFB_PARMS,
__TCA_SFB_MAX,
};






struct tc_sfb_qopt {
__u32 rehash_interval;
__u32 warmup_time;
__u32 max;
__u32 bin_size;
__u32 increment;
__u32 decrement;
__u32 limit;
__u32 penalty_rate;
__u32 penalty_burst;
};

struct tc_sfb_xstats {
__u32 earlydrop;
__u32 penaltydrop;
__u32 bucketdrop;
__u32 queuedrop;
__u32 childdrop;
__u32 marked;
__u32 maxqlen;
__u32 maxprob;
__u32 avgprob;
};




enum {
TCA_QFQ_UNSPEC,
TCA_QFQ_WEIGHT,
TCA_QFQ_LMAX,
__TCA_QFQ_MAX
};



struct tc_qfq_stats {
__u32 weight;
__u32 lmax;
};



enum {
TCA_CODEL_UNSPEC,
TCA_CODEL_TARGET,
TCA_CODEL_LIMIT,
TCA_CODEL_INTERVAL,
TCA_CODEL_ECN,
TCA_CODEL_CE_THRESHOLD,
__TCA_CODEL_MAX
};



struct tc_codel_xstats {
__u32 maxpacket;
__u32 count;


__u32 lastcount;
__u32 ldelay;
__s32 drop_next;
__u32 drop_overlimit;
__u32 ecn_mark;
__u32 dropping;
__u32 ce_mark;
};





enum {
TCA_FQ_CODEL_UNSPEC,
TCA_FQ_CODEL_TARGET,
TCA_FQ_CODEL_LIMIT,
TCA_FQ_CODEL_INTERVAL,
TCA_FQ_CODEL_ECN,
TCA_FQ_CODEL_FLOWS,
TCA_FQ_CODEL_QUANTUM,
TCA_FQ_CODEL_CE_THRESHOLD,
TCA_FQ_CODEL_DROP_BATCH_SIZE,
TCA_FQ_CODEL_MEMORY_LIMIT,
TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR,
TCA_FQ_CODEL_CE_THRESHOLD_MASK,
__TCA_FQ_CODEL_MAX
};



enum {
TCA_FQ_CODEL_XSTATS_QDISC,
TCA_FQ_CODEL_XSTATS_CLASS,
};

struct tc_fq_codel_qd_stats {
__u32 maxpacket;
__u32 drop_overlimit;


__u32 ecn_mark;


__u32 new_flow_count;


__u32 new_flows_len;
__u32 old_flows_len;
__u32 ce_mark;
__u32 memory_usage;
__u32 drop_overmemory;
};

struct tc_fq_codel_cl_stats {
__s32 deficit;
__u32 ldelay;


__u32 count;
__u32 lastcount;
__u32 dropping;
__s32 drop_next;
};

struct tc_fq_codel_xstats {
__u32 type;
union {
struct tc_fq_codel_qd_stats qdisc_stats;
struct tc_fq_codel_cl_stats class_stats;
};
};



enum {
TCA_FQ_UNSPEC,

TCA_FQ_PLIMIT,

TCA_FQ_FLOW_PLIMIT,

TCA_FQ_QUANTUM,

TCA_FQ_INITIAL_QUANTUM,

TCA_FQ_RATE_ENABLE,

TCA_FQ_FLOW_DEFAULT_RATE,

TCA_FQ_FLOW_MAX_RATE,

TCA_FQ_BUCKETS_LOG,

TCA_FQ_FLOW_REFILL_DELAY,

TCA_FQ_ORPHAN_MASK,

TCA_FQ_LOW_RATE_THRESHOLD,

TCA_FQ_CE_THRESHOLD,

TCA_FQ_TIMER_SLACK,

TCA_FQ_HORIZON,

TCA_FQ_HORIZON_DROP,

__TCA_FQ_MAX
};



struct tc_fq_qd_stats {
__u64 gc_flows;
__u64 highprio_packets;
__u64 tcp_retrans;
__u64 throttled;
__u64 flows_plimit;
__u64 pkts_too_long;
__u64 allocation_errors;
__s64 time_next_delayed_flow;
__u32 flows;
__u32 inactive_flows;
__u32 throttled_flows;
__u32 unthrottle_latency_ns;
__u64 ce_mark;
__u64 horizon_drops;
__u64 horizon_caps;
};



enum {
TCA_HHF_UNSPEC,
TCA_HHF_BACKLOG_LIMIT,
TCA_HHF_QUANTUM,
TCA_HHF_HH_FLOWS_LIMIT,
TCA_HHF_RESET_TIMEOUT,
TCA_HHF_ADMIT_BYTES,
TCA_HHF_EVICT_TIMEOUT,
TCA_HHF_NON_HH_WEIGHT,
__TCA_HHF_MAX
};



struct tc_hhf_xstats {
__u32 drop_overlimit;


__u32 hh_overlimit;
__u32 hh_tot_count;
__u32 hh_cur_count;
};


enum {
TCA_PIE_UNSPEC,
TCA_PIE_TARGET,
TCA_PIE_LIMIT,
TCA_PIE_TUPDATE,
TCA_PIE_ALPHA,
TCA_PIE_BETA,
TCA_PIE_ECN,
TCA_PIE_BYTEMODE,
TCA_PIE_DQ_RATE_ESTIMATOR,
__TCA_PIE_MAX
};


struct tc_pie_xstats {
__u64 prob;
__u32 delay;
__u32 avg_dq_rate;


__u32 dq_rate_estimating;
__u32 packets_in;
__u32 dropped;
__u32 overlimit;


__u32 maxq;
__u32 ecn_mark;
};


enum {
TCA_FQ_PIE_UNSPEC,
TCA_FQ_PIE_LIMIT,
TCA_FQ_PIE_FLOWS,
TCA_FQ_PIE_TARGET,
TCA_FQ_PIE_TUPDATE,
TCA_FQ_PIE_ALPHA,
TCA_FQ_PIE_BETA,
TCA_FQ_PIE_QUANTUM,
TCA_FQ_PIE_MEMORY_LIMIT,
TCA_FQ_PIE_ECN_PROB,
TCA_FQ_PIE_ECN,
TCA_FQ_PIE_BYTEMODE,
TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
__TCA_FQ_PIE_MAX
};


struct tc_fq_pie_xstats {
__u32 packets_in;
__u32 dropped;
__u32 overlimit;
__u32 overmemory;
__u32 ecn_mark;
__u32 new_flow_count;
__u32 new_flows_len;
__u32 old_flows_len;
__u32 memory_usage;
};


struct tc_cbs_qopt {
__u8 offload;
__u8 _pad[3];
__s32 hicredit;
__s32 locredit;
__s32 idleslope;
__s32 sendslope;
};

enum {
TCA_CBS_UNSPEC,
TCA_CBS_PARMS,
__TCA_CBS_MAX,
};





struct tc_etf_qopt {
__s32 delta;
__s32 clockid;
__u32 flags;



};

enum {
TCA_ETF_UNSPEC,
TCA_ETF_PARMS,
__TCA_ETF_MAX,
};





enum {
TCA_CAKE_UNSPEC,
TCA_CAKE_PAD,
TCA_CAKE_BASE_RATE64,
TCA_CAKE_DIFFSERV_MODE,
TCA_CAKE_ATM,
TCA_CAKE_FLOW_MODE,
TCA_CAKE_OVERHEAD,
TCA_CAKE_RTT,
TCA_CAKE_TARGET,
TCA_CAKE_AUTORATE,
TCA_CAKE_MEMORY,
TCA_CAKE_NAT,
TCA_CAKE_RAW,
TCA_CAKE_WASH,
TCA_CAKE_MPU,
TCA_CAKE_INGRESS,
TCA_CAKE_ACK_FILTER,
TCA_CAKE_SPLIT_GSO,
TCA_CAKE_FWMARK,
__TCA_CAKE_MAX
};


enum {
__TCA_CAKE_STATS_INVALID,
TCA_CAKE_STATS_PAD,
TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
TCA_CAKE_STATS_MEMORY_LIMIT,
TCA_CAKE_STATS_MEMORY_USED,
TCA_CAKE_STATS_AVG_NETOFF,
TCA_CAKE_STATS_MIN_NETLEN,
TCA_CAKE_STATS_MAX_NETLEN,
TCA_CAKE_STATS_MIN_ADJLEN,
TCA_CAKE_STATS_MAX_ADJLEN,
TCA_CAKE_STATS_TIN_STATS,
TCA_CAKE_STATS_DEFICIT,
TCA_CAKE_STATS_COBALT_COUNT,
TCA_CAKE_STATS_DROPPING,
TCA_CAKE_STATS_DROP_NEXT_US,
TCA_CAKE_STATS_P_DROP,
TCA_CAKE_STATS_BLUE_TIMER_US,
__TCA_CAKE_STATS_MAX
};


enum {
__TCA_CAKE_TIN_STATS_INVALID,
TCA_CAKE_TIN_STATS_PAD,
TCA_CAKE_TIN_STATS_SENT_PACKETS,
TCA_CAKE_TIN_STATS_SENT_BYTES64,
TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
TCA_CAKE_TIN_STATS_TARGET_US,
TCA_CAKE_TIN_STATS_INTERVAL_US,
TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
TCA_CAKE_TIN_STATS_WAY_MISSES,
TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
TCA_CAKE_TIN_STATS_AVG_DELAY_US,
TCA_CAKE_TIN_STATS_BASE_DELAY_US,
TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
TCA_CAKE_TIN_STATS_BULK_FLOWS,
TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
TCA_CAKE_TIN_STATS_MAX_SKBLEN,
TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
__TCA_CAKE_TIN_STATS_MAX
};



enum {
CAKE_FLOW_NONE = 0,
CAKE_FLOW_SRC_IP,
CAKE_FLOW_DST_IP,
CAKE_FLOW_HOSTS,
CAKE_FLOW_FLOWS,
CAKE_FLOW_DUAL_SRC,
CAKE_FLOW_DUAL_DST,
CAKE_FLOW_TRIPLE,
CAKE_FLOW_MAX,
};

enum {
CAKE_DIFFSERV_DIFFSERV3 = 0,
CAKE_DIFFSERV_DIFFSERV4,
CAKE_DIFFSERV_DIFFSERV8,
CAKE_DIFFSERV_BESTEFFORT,
CAKE_DIFFSERV_PRECEDENCE,
CAKE_DIFFSERV_MAX
};

enum {
CAKE_ACK_NONE = 0,
CAKE_ACK_FILTER,
CAKE_ACK_AGGRESSIVE,
CAKE_ACK_MAX
};

enum {
CAKE_ATM_NONE = 0,
CAKE_ATM_ATM,
CAKE_ATM_PTM,
CAKE_ATM_MAX
};



enum {
TC_TAPRIO_CMD_SET_GATES = 0x00,
TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
};

enum {
TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
TCA_TAPRIO_SCHED_ENTRY_INDEX,
TCA_TAPRIO_SCHED_ENTRY_CMD,
TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
__TCA_TAPRIO_SCHED_ENTRY_MAX,
};
# 1214 "./include/uapi/linux/pkt_sched.h"
enum {
TCA_TAPRIO_SCHED_UNSPEC,
TCA_TAPRIO_SCHED_ENTRY,
__TCA_TAPRIO_SCHED_MAX,
};
# 1235 "./include/uapi/linux/pkt_sched.h"
enum {
TCA_TAPRIO_ATTR_UNSPEC,
TCA_TAPRIO_ATTR_PRIOMAP,
TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST,
TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY,
TCA_TAPRIO_ATTR_SCHED_CLOCKID,
TCA_TAPRIO_PAD,
TCA_TAPRIO_ATTR_ADMIN_SCHED,
TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
TCA_TAPRIO_ATTR_FLAGS,
TCA_TAPRIO_ATTR_TXTIME_DELAY,
__TCA_TAPRIO_ATTR_MAX,
};







enum {
TCA_ETS_UNSPEC,
TCA_ETS_NBANDS,
TCA_ETS_NSTRICT,
TCA_ETS_QUANTA,
TCA_ETS_QUANTA_BAND,
TCA_ETS_PRIOMAP,
TCA_ETS_PRIOMAP_BAND,
__TCA_ETS_MAX,
};
# 7 "./include/uapi/linux/pkt_cls.h" 2




enum {
TCA_ACT_UNSPEC,
TCA_ACT_KIND,
TCA_ACT_OPTIONS,
TCA_ACT_INDEX,
TCA_ACT_STATS,
TCA_ACT_PAD,
TCA_ACT_COOKIE,
TCA_ACT_FLAGS,
TCA_ACT_HW_STATS,
TCA_ACT_USED_HW_STATS,
TCA_ACT_IN_HW_COUNT,
__TCA_ACT_MAX
};
# 119 "./include/uapi/linux/pkt_cls.h"
enum tca_id {
TCA_ID_UNSPEC = 0,
TCA_ID_POLICE = 1,
TCA_ID_GACT = 5,
TCA_ID_IPT = 6,
TCA_ID_PEDIT = 7,
TCA_ID_MIRRED = 8,
TCA_ID_NAT = 9,
TCA_ID_XT = 10,
TCA_ID_SKBEDIT = 11,
TCA_ID_VLAN = 12,
TCA_ID_BPF = 13,
TCA_ID_CONNMARK = 14,
TCA_ID_SKBMOD = 15,
TCA_ID_CSUM = 16,
TCA_ID_TUNNEL_KEY = 17,
TCA_ID_SIMP = 22,
TCA_ID_IFE = 25,
TCA_ID_SAMPLE = 26,
TCA_ID_CTINFO,
TCA_ID_MPLS,
TCA_ID_CT,
TCA_ID_GATE,

__TCA_ID_MAX = 255
};



struct tc_police {
__u32 index;
int action;






__u32 limit;
__u32 burst;
__u32 mtu;
struct tc_ratespec rate;
struct tc_ratespec peakrate;
int refcnt;
int bindcnt;
__u32 capab;
};

struct tcf_t {
__u64 install;
__u64 lastuse;
__u64 expires;
__u64 firstuse;
};

struct tc_cnt {
int refcnt;
int bindcnt;
};
# 186 "./include/uapi/linux/pkt_cls.h"
enum {
TCA_POLICE_UNSPEC,
TCA_POLICE_TBF,
TCA_POLICE_RATE,
TCA_POLICE_PEAKRATE,
TCA_POLICE_AVRATE,
TCA_POLICE_RESULT,
TCA_POLICE_TM,
TCA_POLICE_PAD,
TCA_POLICE_RATE64,
TCA_POLICE_PEAKRATE64,
TCA_POLICE_PKTRATE64,
TCA_POLICE_PKTBURST64,
__TCA_POLICE_MAX

};
# 222 "./include/uapi/linux/pkt_cls.h"
enum {
TCA_U32_UNSPEC,
TCA_U32_CLASSID,
TCA_U32_HASH,
TCA_U32_LINK,
TCA_U32_DIVISOR,
TCA_U32_SEL,
TCA_U32_POLICE,
TCA_U32_ACT,
TCA_U32_INDEV,
TCA_U32_PCNT,
TCA_U32_MARK,
TCA_U32_FLAGS,
TCA_U32_PAD,
__TCA_U32_MAX
};



struct tc_u32_key {
__be32 mask;
__be32 val;
int off;
int offmask;
};

struct tc_u32_sel {
unsigned char flags;
unsigned char offshift;
unsigned char nkeys;

__be16 offmask;
__u16 off;
short offoff;

short hoff;
__be32 hmask;
struct tc_u32_key keys[0];
};

struct tc_u32_mark {
__u32 val;
__u32 mask;
__u32 success;
};

struct tc_u32_pcnt {
__u64 rcnt;
__u64 rhit;
__u64 kcnts[0];
};
# 286 "./include/uapi/linux/pkt_cls.h"
enum {
TCA_RSVP_UNSPEC,
TCA_RSVP_CLASSID,
TCA_RSVP_DST,
TCA_RSVP_SRC,
TCA_RSVP_PINFO,
TCA_RSVP_POLICE,
TCA_RSVP_ACT,
__TCA_RSVP_MAX
};



struct tc_rsvp_gpi {
__u32 key;
__u32 mask;
int offset;
};

struct tc_rsvp_pinfo {
struct tc_rsvp_gpi dpi;
struct tc_rsvp_gpi spi;
__u8 protocol;
__u8 tunnelid;
__u8 tunnelhdr;
__u8 pad;
};



enum {
TCA_ROUTE4_UNSPEC,
TCA_ROUTE4_CLASSID,
TCA_ROUTE4_TO,
TCA_ROUTE4_FROM,
TCA_ROUTE4_IIF,
TCA_ROUTE4_POLICE,
TCA_ROUTE4_ACT,
__TCA_ROUTE4_MAX
};






enum {
TCA_FW_UNSPEC,
TCA_FW_CLASSID,
TCA_FW_POLICE,
TCA_FW_INDEV,
TCA_FW_ACT,
TCA_FW_MASK,
__TCA_FW_MAX
};





enum {
TCA_TCINDEX_UNSPEC,
TCA_TCINDEX_HASH,
TCA_TCINDEX_MASK,
TCA_TCINDEX_SHIFT,
TCA_TCINDEX_FALL_THROUGH,
TCA_TCINDEX_CLASSID,
TCA_TCINDEX_POLICE,
TCA_TCINDEX_ACT,
__TCA_TCINDEX_MAX
};





enum {
FLOW_KEY_SRC,
FLOW_KEY_DST,
FLOW_KEY_PROTO,
FLOW_KEY_PROTO_SRC,
FLOW_KEY_PROTO_DST,
FLOW_KEY_IIF,
FLOW_KEY_PRIORITY,
FLOW_KEY_MARK,
FLOW_KEY_NFCT,
FLOW_KEY_NFCT_SRC,
FLOW_KEY_NFCT_DST,
FLOW_KEY_NFCT_PROTO_SRC,
FLOW_KEY_NFCT_PROTO_DST,
FLOW_KEY_RTCLASSID,
FLOW_KEY_SKUID,
FLOW_KEY_SKGID,
FLOW_KEY_VLAN_TAG,
FLOW_KEY_RXHASH,
__FLOW_KEY_MAX,
};



enum {
FLOW_MODE_MAP,
FLOW_MODE_HASH,
};

enum {
TCA_FLOW_UNSPEC,
TCA_FLOW_KEYS,
TCA_FLOW_MODE,
TCA_FLOW_BASECLASS,
TCA_FLOW_RSHIFT,
TCA_FLOW_ADDEND,
TCA_FLOW_MASK,
TCA_FLOW_XOR,
TCA_FLOW_DIVISOR,
TCA_FLOW_ACT,
TCA_FLOW_POLICE,
TCA_FLOW_EMATCHES,
TCA_FLOW_PERTURB,
__TCA_FLOW_MAX
};





struct tc_basic_pcnt {
__u64 rcnt;
__u64 rhit;
};

enum {
TCA_BASIC_UNSPEC,
TCA_BASIC_CLASSID,
TCA_BASIC_EMATCHES,
TCA_BASIC_ACT,
TCA_BASIC_POLICE,
TCA_BASIC_PCNT,
TCA_BASIC_PAD,
__TCA_BASIC_MAX
};






enum {
TCA_CGROUP_UNSPEC,
TCA_CGROUP_ACT,
TCA_CGROUP_POLICE,
TCA_CGROUP_EMATCHES,
__TCA_CGROUP_MAX,
};







enum {
TCA_BPF_UNSPEC,
TCA_BPF_ACT,
TCA_BPF_POLICE,
TCA_BPF_CLASSID,
TCA_BPF_OPS_LEN,
TCA_BPF_OPS,
TCA_BPF_FD,
TCA_BPF_NAME,
TCA_BPF_FLAGS,
TCA_BPF_FLAGS_GEN,
TCA_BPF_TAG,
TCA_BPF_ID,
__TCA_BPF_MAX,
};





enum {
TCA_FLOWER_UNSPEC,
TCA_FLOWER_CLASSID,
TCA_FLOWER_INDEV,
TCA_FLOWER_ACT,
TCA_FLOWER_KEY_ETH_DST,
TCA_FLOWER_KEY_ETH_DST_MASK,
TCA_FLOWER_KEY_ETH_SRC,
TCA_FLOWER_KEY_ETH_SRC_MASK,
TCA_FLOWER_KEY_ETH_TYPE,
TCA_FLOWER_KEY_IP_PROTO,
TCA_FLOWER_KEY_IPV4_SRC,
TCA_FLOWER_KEY_IPV4_SRC_MASK,
TCA_FLOWER_KEY_IPV4_DST,
TCA_FLOWER_KEY_IPV4_DST_MASK,
TCA_FLOWER_KEY_IPV6_SRC,
TCA_FLOWER_KEY_IPV6_SRC_MASK,
TCA_FLOWER_KEY_IPV6_DST,
TCA_FLOWER_KEY_IPV6_DST_MASK,
TCA_FLOWER_KEY_TCP_SRC,
TCA_FLOWER_KEY_TCP_DST,
TCA_FLOWER_KEY_UDP_SRC,
TCA_FLOWER_KEY_UDP_DST,

TCA_FLOWER_FLAGS,
TCA_FLOWER_KEY_VLAN_ID,
TCA_FLOWER_KEY_VLAN_PRIO,
TCA_FLOWER_KEY_VLAN_ETH_TYPE,

TCA_FLOWER_KEY_ENC_KEY_ID,
TCA_FLOWER_KEY_ENC_IPV4_SRC,
TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
TCA_FLOWER_KEY_ENC_IPV4_DST,
TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
TCA_FLOWER_KEY_ENC_IPV6_SRC,
TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
TCA_FLOWER_KEY_ENC_IPV6_DST,
TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,

TCA_FLOWER_KEY_TCP_SRC_MASK,
TCA_FLOWER_KEY_TCP_DST_MASK,
TCA_FLOWER_KEY_UDP_SRC_MASK,
TCA_FLOWER_KEY_UDP_DST_MASK,
TCA_FLOWER_KEY_SCTP_SRC_MASK,
TCA_FLOWER_KEY_SCTP_DST_MASK,

TCA_FLOWER_KEY_SCTP_SRC,
TCA_FLOWER_KEY_SCTP_DST,

TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,

TCA_FLOWER_KEY_FLAGS,
TCA_FLOWER_KEY_FLAGS_MASK,

TCA_FLOWER_KEY_ICMPV4_CODE,
TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
TCA_FLOWER_KEY_ICMPV4_TYPE,
TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
TCA_FLOWER_KEY_ICMPV6_CODE,
TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
TCA_FLOWER_KEY_ICMPV6_TYPE,
TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,

TCA_FLOWER_KEY_ARP_SIP,
TCA_FLOWER_KEY_ARP_SIP_MASK,
TCA_FLOWER_KEY_ARP_TIP,
TCA_FLOWER_KEY_ARP_TIP_MASK,
TCA_FLOWER_KEY_ARP_OP,
TCA_FLOWER_KEY_ARP_OP_MASK,
TCA_FLOWER_KEY_ARP_SHA,
TCA_FLOWER_KEY_ARP_SHA_MASK,
TCA_FLOWER_KEY_ARP_THA,
TCA_FLOWER_KEY_ARP_THA_MASK,

TCA_FLOWER_KEY_MPLS_TTL,
TCA_FLOWER_KEY_MPLS_BOS,
TCA_FLOWER_KEY_MPLS_TC,
TCA_FLOWER_KEY_MPLS_LABEL,

TCA_FLOWER_KEY_TCP_FLAGS,
TCA_FLOWER_KEY_TCP_FLAGS_MASK,

TCA_FLOWER_KEY_IP_TOS,
TCA_FLOWER_KEY_IP_TOS_MASK,
TCA_FLOWER_KEY_IP_TTL,
TCA_FLOWER_KEY_IP_TTL_MASK,

TCA_FLOWER_KEY_CVLAN_ID,
TCA_FLOWER_KEY_CVLAN_PRIO,
TCA_FLOWER_KEY_CVLAN_ETH_TYPE,

TCA_FLOWER_KEY_ENC_IP_TOS,
TCA_FLOWER_KEY_ENC_IP_TOS_MASK,
TCA_FLOWER_KEY_ENC_IP_TTL,
TCA_FLOWER_KEY_ENC_IP_TTL_MASK,

TCA_FLOWER_KEY_ENC_OPTS,
TCA_FLOWER_KEY_ENC_OPTS_MASK,

TCA_FLOWER_IN_HW_COUNT,

TCA_FLOWER_KEY_PORT_SRC_MIN,
TCA_FLOWER_KEY_PORT_SRC_MAX,
TCA_FLOWER_KEY_PORT_DST_MIN,
TCA_FLOWER_KEY_PORT_DST_MAX,

TCA_FLOWER_KEY_CT_STATE,
TCA_FLOWER_KEY_CT_STATE_MASK,
TCA_FLOWER_KEY_CT_ZONE,
TCA_FLOWER_KEY_CT_ZONE_MASK,
TCA_FLOWER_KEY_CT_MARK,
TCA_FLOWER_KEY_CT_MARK_MASK,
TCA_FLOWER_KEY_CT_LABELS,
TCA_FLOWER_KEY_CT_LABELS_MASK,

TCA_FLOWER_KEY_MPLS_OPTS,

TCA_FLOWER_KEY_HASH,
TCA_FLOWER_KEY_HASH_MASK,

__TCA_FLOWER_MAX,
};



enum {
TCA_FLOWER_KEY_CT_FLAGS_NEW = 1 << 0,
TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 1 << 1,
TCA_FLOWER_KEY_CT_FLAGS_RELATED = 1 << 2,
TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 1 << 3,
TCA_FLOWER_KEY_CT_FLAGS_INVALID = 1 << 4,
TCA_FLOWER_KEY_CT_FLAGS_REPLY = 1 << 5,
__TCA_FLOWER_KEY_CT_FLAGS_MAX,
};

enum {
TCA_FLOWER_KEY_ENC_OPTS_UNSPEC,
TCA_FLOWER_KEY_ENC_OPTS_GENEVE,



TCA_FLOWER_KEY_ENC_OPTS_VXLAN,



TCA_FLOWER_KEY_ENC_OPTS_ERSPAN,



TCA_FLOWER_KEY_ENC_OPTS_GTP,



__TCA_FLOWER_KEY_ENC_OPTS_MAX,
};



enum {
TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC,
TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,

__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
};




enum {
TCA_FLOWER_KEY_ENC_OPT_VXLAN_UNSPEC,
TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP,
__TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX,
};




enum {
TCA_FLOWER_KEY_ENC_OPT_ERSPAN_UNSPEC,
TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER,
TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX,
TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX,
};




enum {
TCA_FLOWER_KEY_ENC_OPT_GTP_UNSPEC,
TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
TCA_FLOWER_KEY_ENC_OPT_GTP_QFI,

__TCA_FLOWER_KEY_ENC_OPT_GTP_MAX,
};




enum {
TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC,
TCA_FLOWER_KEY_MPLS_OPTS_LSE,
__TCA_FLOWER_KEY_MPLS_OPTS_MAX,
};



enum {
TCA_FLOWER_KEY_MPLS_OPT_LSE_UNSPEC,
TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
__TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX,
};




enum {
TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};





struct tc_matchall_pcnt {
__u64 rhit;
};

enum {
TCA_MATCHALL_UNSPEC,
TCA_MATCHALL_CLASSID,
TCA_MATCHALL_ACT,
TCA_MATCHALL_FLAGS,
TCA_MATCHALL_PCNT,
TCA_MATCHALL_PAD,
__TCA_MATCHALL_MAX,
};





struct tcf_ematch_tree_hdr {
__u16 nmatches;
__u16 progid;
};

enum {
TCA_EMATCH_TREE_UNSPEC,
TCA_EMATCH_TREE_HDR,
TCA_EMATCH_TREE_LIST,
__TCA_EMATCH_TREE_MAX
};


struct tcf_ematch_hdr {
__u16 matchid;
__u16 kind;
__u16 flags;
__u16 pad;
};
# 763 "./include/uapi/linux/pkt_cls.h"
enum {
TCF_LAYER_LINK,
TCF_LAYER_NETWORK,
TCF_LAYER_TRANSPORT,
__TCF_LAYER_MAX
};
# 787 "./include/uapi/linux/pkt_cls.h"
enum {
TCF_EM_PROG_TC
};

enum {
TCF_EM_OPND_EQ,
TCF_EM_OPND_GT,
TCF_EM_OPND_LT
};
# 50 "./include/linux/netdevice.h" 2
# 1 "./include/linux/hashtable.h" 1
# 34 "./include/linux/hashtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __hash_init(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;

for (i = 0; i < sz; i++)
((&ht[i])->first = ((void *)0));
}
# 76 "./include/linux/hashtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool hash_hashed(struct hlist_node *node)
{
return !hlist_unhashed(node);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __hash_empty(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;

for (i = 0; i < sz; i++)
if (!hlist_empty(&ht[i]))
return false;

return true;
}
# 105 "./include/linux/hashtable.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hash_del(struct hlist_node *node)
{
hlist_del_init(node);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hash_del_rcu(struct hlist_node *node)
{
hlist_del_init_rcu(node);
}
# 51 "./include/linux/netdevice.h" 2



struct netpoll_info;
struct device;
struct ethtool_ops;
struct phy_device;
struct dsa_port;
struct ip_tunnel_parm;
struct macsec_context;
struct macsec_ops;

struct sfp_bus;

struct wireless_dev;

struct wpan_dev;
struct mpls_dev;

struct udp_tunnel_info;
struct udp_tunnel_nic_info;
struct udp_tunnel_nic;
struct bpf_prog;
struct xdp_buff;

void synchronize_net(void);
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
# 118 "./include/linux/netdevice.h"
enum netdev_tx {
__NETDEV_TX_MIN = (-((int)(~0U >> 1)) - 1),
NETDEV_TX_OK = 0x00,
NETDEV_TX_BUSY = 0x10,
};
typedef enum netdev_tx netdev_tx_t;





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dev_xmit_complete(int rc)
{






if (__builtin_expect(!!(rc < 0x0f), 1))
return true;

return false;
}
# 172 "./include/linux/netdevice.h"
struct net_device_stats {
unsigned long rx_packets;
unsigned long tx_packets;
unsigned long rx_bytes;
unsigned long tx_bytes;
unsigned long rx_errors;
unsigned long tx_errors;
unsigned long rx_dropped;
unsigned long tx_dropped;
unsigned long multicast;
unsigned long collisions;
unsigned long rx_length_errors;
unsigned long rx_over_errors;
unsigned long rx_crc_errors;
unsigned long rx_frame_errors;
unsigned long rx_fifo_errors;
unsigned long rx_missed_errors;
unsigned long tx_aborted_errors;
unsigned long tx_carrier_errors;
unsigned long tx_fifo_errors;
unsigned long tx_heartbeat_errors;
unsigned long tx_window_errors;
unsigned long rx_compressed;
unsigned long tx_compressed;
};




struct net_device_core_stats {
unsigned long rx_dropped;
unsigned long tx_dropped;
unsigned long rx_nohandler;
} __attribute__((__aligned__(4 * sizeof(unsigned long))));






# 1 "./include/linux/static_key.h" 1
# 212 "./include/linux/netdevice.h" 2
extern struct static_key_false rps_needed;
extern struct static_key_false rfs_needed;


struct neighbour;
struct neigh_parms;
struct sk_buff;

struct netdev_hw_addr {
struct list_head list;
struct rb_node node;
unsigned char addr[32];
unsigned char type;




bool global_use;
int sync_cnt;
int refcount;
int synced;
struct callback_head callback_head;
};

struct netdev_hw_addr_list {
struct list_head list;
int count;


struct rb_root tree;
};
# 259 "./include/linux/netdevice.h"
struct hh_cache {
unsigned int hh_len;
seqlock_t hh_lock;







unsigned long hh_data[(((32)+(16 -1))&~(16 - 1)) / sizeof(long)];
};
# 285 "./include/linux/netdevice.h"
struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned int len);
int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr);
bool (*validate)(const char *ll_header, unsigned int len);
__be16 (*parse_protocol)(const struct sk_buff *skb);
};






enum netdev_state_t {
__LINK_STATE_START,
__LINK_STATE_PRESENT,
__LINK_STATE_NOCARRIER,
__LINK_STATE_LINKWATCH_PENDING,
__LINK_STATE_DORMANT,
__LINK_STATE_TESTING,
};

struct gro_list {
struct list_head list;
int count;
};
# 326 "./include/linux/netdevice.h"
struct napi_struct {






struct list_head poll_list;

unsigned long state;
int weight;
int defer_hard_irqs_count;
unsigned long gro_bitmask;
int (*poll)(struct napi_struct *, int);



struct net_device *dev;
struct gro_list gro_hash[8];
struct sk_buff *skb;
struct list_head rx_list;
int rx_count;
struct hrtimer timer;
struct list_head dev_list;
struct hlist_node napi_hash_node;
unsigned int napi_id;
struct task_struct *thread;
};

enum {
NAPI_STATE_SCHED,
NAPI_STATE_MISSED,
NAPI_STATE_DISABLE,
NAPI_STATE_NPSVC,
NAPI_STATE_LISTED,
NAPI_STATE_NO_BUSY_POLL,
NAPI_STATE_IN_BUSY_POLL,
NAPI_STATE_PREFER_BUSY_POLL,
NAPI_STATE_THREADED,
NAPI_STATE_SCHED_THREADED,
};

enum {
NAPIF_STATE_SCHED = ((((1UL))) << (NAPI_STATE_SCHED)),
NAPIF_STATE_MISSED = ((((1UL))) << (NAPI_STATE_MISSED)),
NAPIF_STATE_DISABLE = ((((1UL))) << (NAPI_STATE_DISABLE)),
NAPIF_STATE_NPSVC = ((((1UL))) << (NAPI_STATE_NPSVC)),
NAPIF_STATE_LISTED = ((((1UL))) << (NAPI_STATE_LISTED)),
NAPIF_STATE_NO_BUSY_POLL = ((((1UL))) << (NAPI_STATE_NO_BUSY_POLL)),
NAPIF_STATE_IN_BUSY_POLL = ((((1UL))) << (NAPI_STATE_IN_BUSY_POLL)),
NAPIF_STATE_PREFER_BUSY_POLL = ((((1UL))) << (NAPI_STATE_PREFER_BUSY_POLL)),
NAPIF_STATE_THREADED = ((((1UL))) << (NAPI_STATE_THREADED)),
NAPIF_STATE_SCHED_THREADED = ((((1UL))) << (NAPI_STATE_SCHED_THREADED)),
};

enum gro_result {
GRO_MERGED,
GRO_MERGED_FREE,
GRO_HELD,
GRO_NORMAL,
GRO_CONSUMED,
};
typedef enum gro_result gro_result_t;
# 431 "./include/linux/netdevice.h"
enum rx_handler_result {
RX_HANDLER_CONSUMED,
RX_HANDLER_ANOTHER,
RX_HANDLER_EXACT,
RX_HANDLER_PASS,
};
typedef enum rx_handler_result rx_handler_result_t;
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);

void __napi_schedule(struct napi_struct *n);
void __napi_schedule_irqoff(struct napi_struct *n);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool napi_disable_pending(struct napi_struct *n)
{
return arch_test_bit(NAPI_STATE_DISABLE, &n->state);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool napi_prefer_busy_poll(struct napi_struct *n)
{
return arch_test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
}

bool napi_schedule_prep(struct napi_struct *n);
# 462 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void napi_schedule(struct napi_struct *n)
{
if (napi_schedule_prep(n))
__napi_schedule(n);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void napi_schedule_irqoff(struct napi_struct *n)
{
if (napi_schedule_prep(n))
__napi_schedule_irqoff(n);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool napi_reschedule(struct napi_struct *napi)
{
if (napi_schedule_prep(napi)) {
__napi_schedule(napi);
return true;
}
return false;
}

bool napi_complete_done(struct napi_struct *n, int work_done);
# 499 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool napi_complete(struct napi_struct *n)
{
return napi_complete_done(n, 0);
}

int dev_set_threaded(struct net_device *dev, bool threaded);
# 513 "./include/linux/netdevice.h"
void napi_disable(struct napi_struct *n);

void napi_enable(struct napi_struct *n);
# 525 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void napi_synchronize(const struct napi_struct *n)
{
if (1)
while (arch_test_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
else
__asm__ __volatile__("": : :"memory");
}
# 542 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool napi_if_scheduled_mark_missed(struct napi_struct *n)
{
unsigned long val, new;

do {
val = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_289(void) ; if (!((sizeof(n->state) == sizeof(char) || sizeof(n->state) == sizeof(short) || sizeof(n->state) == sizeof(int) || sizeof(n->state) == sizeof(long)) || sizeof(n->state) == sizeof(long long))) __compiletime_assert_289(); } while (0); (*(const volatile typeof( _Generic((n->state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (n->state))) *)&(n->state)); });
if (val & NAPIF_STATE_DISABLE)
return true;

if (!(val & NAPIF_STATE_SCHED))
return false;

new = val | NAPIF_STATE_MISSED;
} while (({ typeof(&n->state) __ai_ptr = (&n->state); do { } while (0); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__(*(__ai_ptr)) _o_ = (val); __typeof__(*(__ai_ptr)) _n_ = (new); (__typeof__(*(__ai_ptr))) ({ __typeof__((__ai_ptr)) __ptr = ((__ai_ptr)); __typeof__(*((__ai_ptr))) __old = (_o_); __typeof__(*((__ai_ptr))) __new = (_n_); __typeof__(*((__ai_ptr))) __ret; register unsigned int __rc; switch (sizeof(*(__ai_ptr))) { case 4: __asm__ __volatile__ ( "0: lr.w %0, %2\n" " bne %0, %z3, 1f\n" " sc.w.rl %1, %z4, %2\n" " bnez %1, 0b\n" " fence rw, rw\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" ((long)__old), "rJ" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( "0: lr.d %0, %2\n" " bne %0, %z3, 1f\n" " sc.d.rl %1, %z4, %2\n" " bnez %1, 0b\n" " fence rw, rw\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" (__old), "rJ" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_290(void) ; if (!(!(1))) __compiletime_assert_290(); } while (0); } __ret; }); }); }) != val);

return true;
}

enum netdev_queue_state_t {
__QUEUE_STATE_DRV_XOFF,
__QUEUE_STATE_STACK_XOFF,
__QUEUE_STATE_FROZEN,
};
# 586 "./include/linux/netdevice.h"
struct netdev_queue {



struct net_device *dev;
netdevice_tracker dev_tracker;

struct Qdisc *qdisc;
struct Qdisc *qdisc_sleeping;

struct kobject kobj;




unsigned long tx_maxrate;




atomic_long_t trans_timeout;


struct net_device *sb_dev;






spinlock_t _xmit_lock __attribute__((__aligned__((1 << 6))));
int xmit_lock_owner;



unsigned long trans_start;

unsigned long state;


struct dql dql;

} __attribute__((__aligned__((1 << 6))));

extern int sysctl_fb_tunnels_only_for_init_net;
extern int sysctl_devconf_inherit_init_net;






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool net_has_fallback_tunnels(const struct net *net)
{
return !1 ||
!sysctl_fb_tunnels_only_for_init_net ||
(net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int netdev_queue_numa_node_read(const struct netdev_queue *q)
{



return (-1);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
{



}






struct rps_map {
unsigned int len;
struct callback_head rcu;
u16 cpus[];
};







struct rps_dev_flow {
u16 cpu;
u16 filter;
unsigned int last_qtail;
};





struct rps_dev_flow_table {
unsigned int mask;
struct callback_head rcu;
struct rps_dev_flow flows[];
};
# 706 "./include/linux/netdevice.h"
struct rps_sock_flow_table {
u32 mask;

u32 ents[] __attribute__((__aligned__((1 << 6))));
};




extern u32 rps_cpu_mask;
extern struct rps_sock_flow_table *rps_sock_flow_table;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rps_record_sock_flow(struct rps_sock_flow_table *table,
u32 hash)
{
if (table && hash) {
unsigned int index = hash & table->mask;
u32 val = hash & ~rps_cpu_mask;


val |= (((struct thread_info *)get_current())->cpu);

if (table->ents[index] != val)
table->ents[index] = val;
}
}


bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
u16 filter_id);




struct netdev_rx_queue {
struct xdp_rxq_info xdp_rxq;

struct rps_map *rps_map;
struct rps_dev_flow_table *rps_flow_table;

struct kobject kobj;
struct net_device *dev;
netdevice_tracker dev_tracker;




} __attribute__((__aligned__((1 << 6))));




struct rx_queue_attribute {
struct attribute attr;
ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
ssize_t (*store)(struct netdev_rx_queue *queue,
const char *buf, size_t len);
};


enum xps_map_type {
XPS_CPUS = 0,
XPS_RXQS,
XPS_MAPS_MAX,
};






struct xps_map {
unsigned int len;
unsigned int alloc_len;
struct callback_head rcu;
u16 queues[];
};
# 798 "./include/linux/netdevice.h"
struct xps_dev_maps {
struct callback_head rcu;
unsigned int nr_ids;
s16 num_tc;
struct xps_map *attr_map[];
};
# 816 "./include/linux/netdevice.h"
struct netdev_tc_txq {
u16 count;
u16 offset;
};
# 843 "./include/linux/netdevice.h"
struct netdev_phys_item_id {
unsigned char id[32];
unsigned char id_len;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
struct netdev_phys_item_id *b)
{
return a->id_len == b->id_len &&
memcmp(a->id, b->id, a->id_len) == 0;
}

typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev);

enum net_device_path_type {
DEV_PATH_ETHERNET = 0,
DEV_PATH_VLAN,
DEV_PATH_BRIDGE,
DEV_PATH_PPPOE,
DEV_PATH_DSA,
};

struct net_device_path {
enum net_device_path_type type;
const struct net_device *dev;
union {
struct {
u16 id;
__be16 proto;
u8 h_dest[6];
} encap;
struct {
enum {
DEV_PATH_BR_VLAN_KEEP,
DEV_PATH_BR_VLAN_TAG,
DEV_PATH_BR_VLAN_UNTAG,
DEV_PATH_BR_VLAN_UNTAG_HW,
} vlan_mode;
u16 vlan_id;
__be16 vlan_proto;
} bridge;
struct {
int port;
u16 proto;
} dsa;
};
};




struct net_device_path_stack {
int num_paths;
struct net_device_path path[5];
};

struct net_device_path_ctx {
const struct net_device *dev;
const u8 *daddr;

int num_vlans;
struct {
u16 id;
__be16 proto;
} vlan[2];
};

enum tc_setup_type {
TC_SETUP_QDISC_MQPRIO,
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
TC_SETUP_CLSMATCHALL,
TC_SETUP_CLSBPF,
TC_SETUP_BLOCK,
TC_SETUP_QDISC_CBS,
TC_SETUP_QDISC_RED,
TC_SETUP_QDISC_PRIO,
TC_SETUP_QDISC_MQ,
TC_SETUP_QDISC_ETF,
TC_SETUP_ROOT_QDISC,
TC_SETUP_QDISC_GRED,
TC_SETUP_QDISC_TAPRIO,
TC_SETUP_FT,
TC_SETUP_QDISC_ETS,
TC_SETUP_QDISC_TBF,
TC_SETUP_QDISC_FIFO,
TC_SETUP_QDISC_HTB,
TC_SETUP_ACT,
};




enum bpf_netdev_command {







XDP_SETUP_PROG,
XDP_SETUP_PROG_HW,

BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE,
XDP_SETUP_XSK_POOL,
};

struct bpf_prog_offload_ops;
struct netlink_ext_ack;
struct xdp_umem;
struct xdp_dev_bulk_queue;
struct bpf_xdp_link;

enum bpf_xdp_mode {
XDP_MODE_SKB = 0,
XDP_MODE_DRV = 1,
XDP_MODE_HW = 2,
__MAX_XDP_MODE
};

struct bpf_xdp_entity {
struct bpf_prog *prog;
struct bpf_xdp_link *link;
};

struct netdev_bpf {
enum bpf_netdev_command command;
union {

struct {
u32 flags;
struct bpf_prog *prog;
struct netlink_ext_ack *extack;
};

struct {
struct bpf_offloaded_map *offmap;
};

struct {
struct xsk_buff_pool *pool;
u16 queue_id;
} xsk;
};
};
# 1008 "./include/linux/netdevice.h"
struct dev_ifalias {
struct callback_head rcuhead;
char ifalias[];
};

struct devlink;
struct tlsdev_ops;

struct netdev_name_node {
struct hlist_node hlist;
struct list_head list;
struct net_device *dev;
const char *name;
};

int netdev_name_node_alt_create(struct net_device *dev, const char *name);
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);

struct netdev_net_notifier {
struct list_head list;
struct notifier_block *nb;
};
# 1357 "./include/linux/netdevice.h"
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
void (*ndo_uninit)(struct net_device *dev);
int (*ndo_open)(struct net_device *dev);
int (*ndo_stop)(struct net_device *dev);
netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
struct net_device *dev);
netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
int (*ndo_set_mac_address)(struct net_device *dev,
void *addr);
int (*ndo_validate_addr)(struct net_device *dev);
int (*ndo_do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
int (*ndo_eth_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
int (*ndo_siocbond)(struct net_device *dev,
struct ifreq *ifr, int cmd);
int (*ndo_siocwandev)(struct net_device *dev,
struct if_settings *ifs);
int (*ndo_siocdevprivate)(struct net_device *dev,
struct ifreq *ifr,
void *data, int cmd);
int (*ndo_set_config)(struct net_device *dev,
struct ifmap *map);
int (*ndo_change_mtu)(struct net_device *dev,
int new_mtu);
int (*ndo_neigh_setup)(struct net_device *dev,
struct neigh_parms *);
void (*ndo_tx_timeout) (struct net_device *dev,
unsigned int txqueue);

void (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
int (*ndo_get_offload_stats)(int attr_id,
const struct net_device *dev,
void *attr_data);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);

int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
__be16 proto, u16 vid);
int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
__be16 proto, u16 vid);






int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
int (*ndo_set_vf_vlan)(struct net_device *dev,
int queue, u16 vlan,
u8 qos, __be16 proto);
int (*ndo_set_vf_rate)(struct net_device *dev,
int vf, int min_tx_rate,
int max_tx_rate);
int (*ndo_set_vf_spoofchk)(struct net_device *dev,
int vf, bool setting);
int (*ndo_set_vf_trust)(struct net_device *dev,
int vf, bool setting);
int (*ndo_get_vf_config)(struct net_device *dev,
int vf,
struct ifla_vf_info *ivf);
int (*ndo_set_vf_link_state)(struct net_device *dev,
int vf, int link_state);
int (*ndo_get_vf_stats)(struct net_device *dev,
int vf,
struct ifla_vf_stats
*vf_stats);
int (*ndo_set_vf_port)(struct net_device *dev,
int vf,
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
int (*ndo_get_vf_guid)(struct net_device *dev,
int vf,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid);
int (*ndo_set_vf_guid)(struct net_device *dev,
int vf, u64 guid,
int guid_type);
int (*ndo_set_vf_rss_query_en)(
struct net_device *dev,
int vf, bool setting);
int (*ndo_setup_tc)(struct net_device *dev,
enum tc_setup_type type,
void *type_data);
# 1478 "./include/linux/netdevice.h"
int (*ndo_rx_flow_steer)(struct net_device *dev,
const struct sk_buff *skb,
u16 rxq_index,
u32 flow_id);

int (*ndo_add_slave)(struct net_device *dev,
struct net_device *slave_dev,
struct netlink_ext_ack *extack);
int (*ndo_del_slave)(struct net_device *dev,
struct net_device *slave_dev);
struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev,
struct sock *sk);
netdev_features_t (*ndo_fix_features)(struct net_device *dev,
netdev_features_t features);
int (*ndo_set_features)(struct net_device *dev,
netdev_features_t features);
int (*ndo_neigh_construct)(struct net_device *dev,
struct neighbour *n);
void (*ndo_neigh_destroy)(struct net_device *dev,
struct neighbour *n);

int (*ndo_fdb_add)(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
u16 vid,
u16 flags,
struct netlink_ext_ack *extack);
int (*ndo_fdb_del)(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
u16 vid);
int (*ndo_fdb_dump)(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
int *idx);
int (*ndo_fdb_get)(struct sk_buff *skb,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
u16 vid, u32 portid, u32 seq,
struct netlink_ext_ack *extack);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
u16 flags,
struct netlink_ext_ack *extack);
int (*ndo_bridge_getlink)(struct sk_buff *skb,
u32 pid, u32 seq,
struct net_device *dev,
u32 filter_mask,
int nlflags);
int (*ndo_bridge_dellink)(struct net_device *dev,
struct nlmsghdr *nlh,
u16 flags);
int (*ndo_change_carrier)(struct net_device *dev,
bool new_carrier);
int (*ndo_get_phys_port_id)(struct net_device *dev,
struct netdev_phys_item_id *ppid);
int (*ndo_get_port_parent_id)(struct net_device *dev,
struct netdev_phys_item_id *ppid);
int (*ndo_get_phys_port_name)(struct net_device *dev,
char *name, size_t len);
void* (*ndo_dfwd_add_station)(struct net_device *pdev,
struct net_device *dev);
void (*ndo_dfwd_del_station)(struct net_device *pdev,
void *priv);

int (*ndo_set_tx_maxrate)(struct net_device *dev,
int queue_index,
u32 maxrate);
int (*ndo_get_iflink)(const struct net_device *dev);
int (*ndo_fill_metadata_dst)(struct net_device *dev,
struct sk_buff *skb);
void (*ndo_set_rx_headroom)(struct net_device *dev,
int needed_headroom);
int (*ndo_bpf)(struct net_device *dev,
struct netdev_bpf *bpf);
int (*ndo_xdp_xmit)(struct net_device *dev, int n,
struct xdp_frame **xdp,
u32 flags);
struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev,
struct xdp_buff *xdp);
int (*ndo_xsk_wakeup)(struct net_device *dev,
u32 queue_id, u32 flags);
struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
int (*ndo_tunnel_ctl)(struct net_device *dev,
struct ip_tunnel_parm *p, int cmd);
struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
struct net_device_path *path);
};
# 1624 "./include/linux/netdevice.h"
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
IFF_EBRIDGE = 1<<1,
IFF_BONDING = 1<<2,
IFF_ISATAP = 1<<3,
IFF_WAN_HDLC = 1<<4,
IFF_XMIT_DST_RELEASE = 1<<5,
IFF_DONT_BRIDGE = 1<<6,
IFF_DISABLE_NETPOLL = 1<<7,
IFF_MACVLAN_PORT = 1<<8,
IFF_BRIDGE_PORT = 1<<9,
IFF_OVS_DATAPATH = 1<<10,
IFF_TX_SKB_SHARING = 1<<11,
IFF_UNICAST_FLT = 1<<12,
IFF_TEAM_PORT = 1<<13,
IFF_SUPP_NOFCS = 1<<14,
IFF_LIVE_ADDR_CHANGE = 1<<15,
IFF_MACVLAN = 1<<16,
IFF_XMIT_DST_RELEASE_PERM = 1<<17,
IFF_L3MDEV_MASTER = 1<<18,
IFF_NO_QUEUE = 1<<19,
IFF_OPENVSWITCH = 1<<20,
IFF_L3MDEV_SLAVE = 1<<21,
IFF_TEAM = 1<<22,
IFF_RXFH_CONFIGURED = 1<<23,
IFF_PHONY_HEADROOM = 1<<24,
IFF_MACSEC = 1<<25,
IFF_NO_RX_HANDLER = 1<<26,
IFF_FAILOVER = 1<<27,
IFF_FAILOVER_SLAVE = 1<<28,
IFF_L3MDEV_RX_HANDLER = 1<<29,
IFF_LIVE_RENAME_OK = 1<<30,
IFF_TX_SKB_NO_LINEAR = 1<<31,
IFF_CHANGE_PROTO_DOWN = ((((1ULL))) << (32)),
};
# 1694 "./include/linux/netdevice.h"
enum netdev_ml_priv_type {
ML_PRIV_NONE,
ML_PRIV_CAN,
};
# 1966 "./include/linux/netdevice.h"
struct net_device {
char name[16];
struct netdev_name_node *name_node;
struct dev_ifalias *ifalias;




unsigned long mem_end;
unsigned long mem_start;
unsigned long base_addr;







unsigned long state;

struct list_head dev_list;
struct list_head napi_list;
struct list_head unreg_list;
struct list_head close_list;
struct list_head ptype_all;
struct list_head ptype_specific;

struct {
struct list_head upper;
struct list_head lower;
} adj_list;


unsigned int flags;
unsigned long long priv_flags;
const struct net_device_ops *netdev_ops;
int ifindex;
unsigned short gflags;
unsigned short hard_header_len;






unsigned int mtu;
unsigned short needed_headroom;
unsigned short needed_tailroom;

netdev_features_t features;
netdev_features_t hw_features;
netdev_features_t wanted_features;
netdev_features_t vlan_features;
netdev_features_t hw_enc_features;
netdev_features_t mpls_features;
netdev_features_t gso_partial_features;

unsigned int min_mtu;
unsigned int max_mtu;
unsigned short type;
unsigned char min_header_len;
unsigned char name_assign_type;

int group;

struct net_device_stats stats;

struct net_device_core_stats *core_stats;


atomic_t carrier_up_count;
atomic_t carrier_down_count;





const struct ethtool_ops *ethtool_ops;




const struct ndisc_ops *ndisc_ops;
# 2059 "./include/linux/netdevice.h"
const struct header_ops *header_ops;

unsigned char operstate;
unsigned char link_mode;

unsigned char if_port;
unsigned char dma;


unsigned char perm_addr[32];
unsigned char addr_assign_type;
unsigned char addr_len;
unsigned char upper_level;
unsigned char lower_level;

unsigned short neigh_priv_len;
unsigned short dev_id;
unsigned short dev_port;
unsigned short padded;

spinlock_t addr_list_lock;
int irq;

struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
struct netdev_hw_addr_list dev_addrs;


struct kset *queues_kset;


struct list_head unlink_list;

unsigned int promiscuity;
unsigned int allmulti;
bool uc_promisc;

unsigned char nested_level;
# 2114 "./include/linux/netdevice.h"
struct in_device *ip_ptr;



struct inet6_dev *ip6_ptr;



struct wireless_dev *ieee80211_ptr;
struct wpan_dev *ieee802154_ptr;
# 2135 "./include/linux/netdevice.h"
const unsigned char *dev_addr;

struct netdev_rx_queue *_rx;
unsigned int num_rx_queues;
unsigned int real_num_rx_queues;

struct bpf_prog *xdp_prog;
unsigned long gro_flush_timeout;
int napi_defer_hard_irqs;

unsigned int gro_max_size;
rx_handler_func_t *rx_handler;
void *rx_handler_data;




struct netdev_queue *ingress_queue;




unsigned char broadcast[32];

struct cpu_rmap *rx_cpu_rmap;

struct hlist_node index_hlist;




struct netdev_queue *_tx __attribute__((__aligned__((1 << 6))));
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
struct Qdisc *qdisc;
unsigned int tx_queue_len;
spinlock_t tx_global_lock;

struct xdp_dev_bulk_queue *xdp_bulkq;


struct xps_dev_maps *xps_maps[XPS_MAPS_MAX];
# 2189 "./include/linux/netdevice.h"
struct timer_list watchdog_timer;
int watchdog_timeo;

u32 proto_down_reason;

struct list_head todo_list;


int *pcpu_refcnt;



struct ref_tracker_dir refcnt_tracker;

struct list_head link_watch_list;

enum { NETREG_UNINITIALIZED=0,
NETREG_REGISTERED,
NETREG_UNREGISTERING,
NETREG_UNREGISTERED,
NETREG_RELEASED,
NETREG_DUMMY,
} reg_state:8;

bool dismantle;

enum {
RTNL_LINK_INITIALIZED,
RTNL_LINK_INITIALIZING,
} rtnl_link_state:16;

bool needs_free_netdev;
void (*priv_destructor)(struct net_device *dev);





possible_net_t nd_net;


void *ml_priv;
enum netdev_ml_priv_type ml_priv_type;

union {
struct pcpu_lstats *lstats;
struct pcpu_sw_netstats *tstats;
struct pcpu_dstats *dstats;
};
# 2248 "./include/linux/netdevice.h"
struct device dev;
const struct attribute_group *sysfs_groups[4];
const struct attribute_group *sysfs_rx_queue_group;

const struct rtnl_link_ops *rtnl_link_ops;



unsigned int gso_max_size;

u16 gso_max_segs;




s16 num_tc;
struct netdev_tc_txq tc_to_txq[16];
u8 prio_tc_map[15 + 1];







struct phy_device *phydev;
struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
bool proto_down;
unsigned wol_enabled:1;
unsigned threaded:1;

struct list_head net_notifier_list;





const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
struct udp_tunnel_nic *udp_tunnel_nic;


struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];

u8 dev_addr_shadow[32];
netdevice_tracker linkwatch_dev_tracker;
netdevice_tracker watchdog_dev_tracker;
netdevice_tracker dev_registered_tracker;
struct rtnl_hw_stats64 *offload_xstats_l3;
};


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_elide_gro(const struct net_device *dev)
{
if (!(dev->features & ((netdev_features_t)1 << (NETIF_F_GRO_BIT))) || dev->xdp_prog)
return true;
return false;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
{
return dev->prio_tc_map[prio & 15];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
{
if (tc >= dev->num_tc)
return -22;

dev->prio_tc_map[prio & 15] = tc & 15;
return 0;
}

int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
void netdev_reset_tc(struct net_device *dev);
int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
int netdev_set_num_tc(struct net_device *dev, u8 num_tc);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int netdev_get_num_tc(struct net_device *dev)
{
return dev->num_tc;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void net_prefetch(void *p)
{
__builtin_prefetch(p);

__builtin_prefetch((u8 *)p + (1 << 6));

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void net_prefetchw(void *p)
{
__builtin_prefetch(p,1);

__builtin_prefetch((u8 *)p + (1 << 6),1);

}

void netdev_unbind_sb_channel(struct net_device *dev,
struct net_device *sb_dev);
int netdev_bind_sb_channel_queue(struct net_device *dev,
struct net_device *sb_dev,
u8 tc, u16 count, u16 offset);
int netdev_set_sb_channel(struct net_device *dev, u16 channel);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int netdev_get_sb_channel(struct net_device *dev)
{
return __builtin_choose_expr(((!!(sizeof((typeof((int)(-dev->num_tc)) *)1 == (typeof((int)(0)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(-dev->num_tc)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(0)) * 0l)) : (int *)8))))), (((int)(-dev->num_tc)) > ((int)(0)) ? ((int)(-dev->num_tc)) : ((int)(0))), ({ typeof((int)(-dev->num_tc)) __UNIQUE_ID___x291 = ((int)(-dev->num_tc)); typeof((int)(0)) __UNIQUE_ID___y292 = ((int)(0)); ((__UNIQUE_ID___x291) > (__UNIQUE_ID___y292) ? (__UNIQUE_ID___x291) : (__UNIQUE_ID___y292)); }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
{
return &dev->_tx[index];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
const struct sk_buff *skb)
{
return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_for_each_tx_queue(struct net_device *dev,
void (*f)(struct net_device *,
struct netdev_queue *,
void *),
void *arg)
{
unsigned int i;

for (i = 0; i < dev->num_tx_queues; i++)
f(dev, &dev->_tx[i], arg);
}
# 2403 "./include/linux/netdevice.h"
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned netdev_get_fwd_headroom(struct net_device *dev)
{
return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
{
if (dev->netdev_ops->ndo_set_rx_headroom)
dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_reset_rx_headroom(struct net_device *dev)
{
netdev_set_rx_headroom(dev, -1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *netdev_get_ml_priv(struct net_device *dev,
enum netdev_ml_priv_type type)
{
if (dev->ml_priv_type != type)
return ((void *)0);

return dev->ml_priv;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_set_ml_priv(struct net_device *dev,
void *ml_priv,
enum netdev_ml_priv_type type)
{
({ int __ret_warn_on = !!(dev->ml_priv_type && dev->ml_priv_type != type); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", dev->ml_priv_type, type); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/netdevice.h"), "i" (2444), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });


({ int __ret_warn_on = !!(!dev->ml_priv_type && dev->ml_priv); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/netdevice.h"), "i" (2446), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });


dev->ml_priv = ml_priv;
dev->ml_priv_type = type;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct net *dev_net(const struct net_device *dev)
{
return read_pnet(&dev->nd_net);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void dev_net_set(struct net_device *dev, struct net *net)
{
write_pnet(&dev->nd_net, net);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *netdev_priv(const struct net_device *dev)
{
return (char *)dev + ((((sizeof(struct net_device))) + ((typeof((sizeof(struct net_device))))((32)) - 1)) & ~((typeof((sizeof(struct net_device))))((32)) - 1));
}
# 2504 "./include/linux/netdevice.h"
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight);
# 2518 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_tx_napi_add(struct net_device *dev,
struct napi_struct *napi,
int (*poll)(struct napi_struct *, int),
int weight)
{
set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
netif_napi_add(dev, napi, poll, weight);
}
# 2535 "./include/linux/netdevice.h"
void __netif_napi_del(struct napi_struct *napi);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_napi_del(struct napi_struct *napi)
{
__netif_napi_del(napi);
synchronize_net();
}

struct packet_type {
__be16 type;
bool ignore_outgoing;
struct net_device *dev;
netdevice_tracker dev_tracker;
int (*func) (struct sk_buff *,
struct net_device *,
struct packet_type *,
struct net_device *);
void (*list_func) (struct list_head *,
struct packet_type *,
struct net_device *);
bool (*id_match)(struct packet_type *ptype,
struct sock *sk);
struct net *af_packet_net;
void *af_packet_priv;
struct list_head list;
};

struct offload_callbacks {
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
netdev_features_t features);
struct sk_buff *(*gro_receive)(struct list_head *head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb, int nhoff);
};

struct packet_offload {
__be16 type;
u16 priority;
struct offload_callbacks callbacks;
struct list_head list;
};


struct pcpu_sw_netstats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
} __attribute__((__aligned__(4 * sizeof(u64))));

struct pcpu_lstats {
u64_stats_t packets;
u64_stats_t bytes;
struct u64_stats_sync syncp;
} __attribute__((__aligned__(2 * sizeof(u64))));

void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
{
struct pcpu_sw_netstats *tstats = ({ do { const void *__vpp_verify = (typeof((dev->tstats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(dev->tstats)) *)(dev->tstats)); (typeof((typeof(*(dev->tstats)) *)(dev->tstats))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });

u64_stats_update_begin(&tstats->syncp);
tstats->rx_bytes += len;
tstats->rx_packets++;
u64_stats_update_end(&tstats->syncp);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_sw_netstats_tx_add(struct net_device *dev,
unsigned int packets,
unsigned int len)
{
struct pcpu_sw_netstats *tstats = ({ do { const void *__vpp_verify = (typeof((dev->tstats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(dev->tstats)) *)(dev->tstats)); (typeof((typeof(*(dev->tstats)) *)(dev->tstats))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });

u64_stats_update_begin(&tstats->syncp);
tstats->tx_bytes += len;
tstats->tx_packets += packets;
u64_stats_update_end(&tstats->syncp);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_lstats_add(struct net_device *dev, unsigned int len)
{
struct pcpu_lstats *lstats = ({ do { const void *__vpp_verify = (typeof((dev->lstats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(dev->lstats)) *)(dev->lstats)); (typeof((typeof(*(dev->lstats)) *)(dev->lstats))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });

u64_stats_update_begin(&lstats->syncp);
u64_stats_add(&lstats->bytes, len);
u64_stats_inc(&lstats->packets);
u64_stats_update_end(&lstats->syncp);
}
# 2663 "./include/linux/netdevice.h"
enum netdev_lag_tx_type {
NETDEV_LAG_TX_TYPE_UNKNOWN,
NETDEV_LAG_TX_TYPE_RANDOM,
NETDEV_LAG_TX_TYPE_BROADCAST,
NETDEV_LAG_TX_TYPE_ROUNDROBIN,
NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
NETDEV_LAG_TX_TYPE_HASH,
};

enum netdev_lag_hash {
NETDEV_LAG_HASH_NONE,
NETDEV_LAG_HASH_L2,
NETDEV_LAG_HASH_L34,
NETDEV_LAG_HASH_L23,
NETDEV_LAG_HASH_E23,
NETDEV_LAG_HASH_E34,
NETDEV_LAG_HASH_VLAN_SRCMAC,
NETDEV_LAG_HASH_UNKNOWN,
};

struct netdev_lag_upper_info {
enum netdev_lag_tx_type tx_type;
enum netdev_lag_hash hash_type;
};

struct netdev_lag_lower_state_info {
u8 link_up : 1,
tx_enabled : 1;
};







enum netdev_cmd {
NETDEV_UP = 1,
NETDEV_DOWN,
NETDEV_REBOOT,



NETDEV_CHANGE,
NETDEV_REGISTER,
NETDEV_UNREGISTER,
NETDEV_CHANGEMTU,
NETDEV_CHANGEADDR,
NETDEV_PRE_CHANGEADDR,
NETDEV_GOING_DOWN,
NETDEV_CHANGENAME,
NETDEV_FEAT_CHANGE,
NETDEV_BONDING_FAILOVER,
NETDEV_PRE_UP,
NETDEV_PRE_TYPE_CHANGE,
NETDEV_POST_TYPE_CHANGE,
NETDEV_POST_INIT,
NETDEV_RELEASE,
NETDEV_NOTIFY_PEERS,
NETDEV_JOIN,
NETDEV_CHANGEUPPER,
NETDEV_RESEND_IGMP,
NETDEV_PRECHANGEMTU,
NETDEV_CHANGEINFODATA,
NETDEV_BONDING_INFO,
NETDEV_PRECHANGEUPPER,
NETDEV_CHANGELOWERSTATE,
NETDEV_UDP_TUNNEL_PUSH_INFO,
NETDEV_UDP_TUNNEL_DROP_INFO,
NETDEV_CHANGE_TX_QUEUE_LEN,
NETDEV_CVLAN_FILTER_PUSH_INFO,
NETDEV_CVLAN_FILTER_DROP_INFO,
NETDEV_SVLAN_FILTER_PUSH_INFO,
NETDEV_SVLAN_FILTER_DROP_INFO,
NETDEV_OFFLOAD_XSTATS_ENABLE,
NETDEV_OFFLOAD_XSTATS_DISABLE,
NETDEV_OFFLOAD_XSTATS_REPORT_USED,
NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
};
const char *netdev_cmd_to_name(enum netdev_cmd cmd);

int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
int unregister_netdevice_notifier_net(struct net *net,
struct notifier_block *nb);
int register_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn);
int unregister_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn);

struct netdev_notifier_info {
struct net_device *dev;
struct netlink_ext_ack *extack;
};

struct netdev_notifier_info_ext {
struct netdev_notifier_info info;
union {
u32 mtu;
} ext;
};

struct netdev_notifier_change_info {
struct netdev_notifier_info info;
unsigned int flags_changed;
};

struct netdev_notifier_changeupper_info {
struct netdev_notifier_info info;
struct net_device *upper_dev;
bool master;
bool linking;
void *upper_info;
};

struct netdev_notifier_changelowerstate_info {
struct netdev_notifier_info info;
void *lower_state_info;
};

struct netdev_notifier_pre_changeaddr_info {
struct netdev_notifier_info info;
const unsigned char *dev_addr;
};

enum netdev_offload_xstats_type {
NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1,
};

struct netdev_notifier_offload_xstats_info {
struct netdev_notifier_info info;
enum netdev_offload_xstats_type type;

union {

struct netdev_notifier_offload_xstats_rd *report_delta;

struct netdev_notifier_offload_xstats_ru *report_used;
};
};

int netdev_offload_xstats_enable(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct netlink_ext_ack *extack);
int netdev_offload_xstats_disable(struct net_device *dev,
enum netdev_offload_xstats_type type);
bool netdev_offload_xstats_enabled(const struct net_device *dev,
enum netdev_offload_xstats_type type);
int netdev_offload_xstats_get(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct rtnl_hw_stats64 *stats, bool *used,
struct netlink_ext_ack *extack);
void
netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd,
const struct rtnl_hw_stats64 *stats);
void
netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru);
void netdev_offload_xstats_push_delta(struct net_device *dev,
enum netdev_offload_xstats_type type,
const struct rtnl_hw_stats64 *stats);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev)
{
info->dev = dev;
info->extack = ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net_device *
netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
{
return info->dev;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct netlink_ext_ack *
netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
{
return info->extack;
}

int call_netdevice_notifiers(unsigned long val, struct net_device *dev);


extern rwlock_t dev_base_lock;
# 2871 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net_device *next_net_device(struct net_device *dev)
{
struct list_head *lh;
struct net *net;

net = dev_net(dev);
lh = dev->dev_list.next;
return lh == &net->dev_base_head ? ((void *)0) : ({ void *__mptr = (void *)(lh); _Static_assert(__builtin_types_compatible_p(typeof(*(lh)), typeof(((struct net_device *)0)->dev_list)) || __builtin_types_compatible_p(typeof(*(lh)), typeof(void)), "pointer type mismatch in container_of()"); ((struct net_device *)(__mptr - __builtin_offsetof(struct net_device, dev_list))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net_device *next_net_device_rcu(struct net_device *dev)
{
struct list_head *lh;
struct net *net;

net = dev_net(dev);
lh = ({ typeof(*((*((struct list_head **)(&(&dev->dev_list)->next))))) *__UNIQUE_ID_rcu293 = (typeof(*((*((struct list_head **)(&(&dev->dev_list)->next))))) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_294(void) ; if (!((sizeof(((*((struct list_head **)(&(&dev->dev_list)->next))))) == sizeof(char) || sizeof(((*((struct list_head **)(&(&dev->dev_list)->next))))) == sizeof(short) || sizeof(((*((struct list_head **)(&(&dev->dev_list)->next))))) == sizeof(int) || sizeof(((*((struct list_head **)(&(&dev->dev_list)->next))))) == sizeof(long)) || sizeof(((*((struct list_head **)(&(&dev->dev_list)->next))))) == sizeof(long long))) __compiletime_assert_294(); } while (0); (*(const volatile typeof( _Generic((((*((struct list_head **)(&(&dev->dev_list)->next))))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((*((struct list_head **)(&(&dev->dev_list)->next))))))) *)&(((*((struct list_head **)(&(&dev->dev_list)->next)))))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*((*((struct list_head **)(&(&dev->dev_list)->next))))) *)(__UNIQUE_ID_rcu293)); });
return lh == &net->dev_base_head ? ((void *)0) : ({ void *__mptr = (void *)(lh); _Static_assert(__builtin_types_compatible_p(typeof(*(lh)), typeof(((struct net_device *)0)->dev_list)) || __builtin_types_compatible_p(typeof(*(lh)), typeof(void)), "pointer type mismatch in container_of()"); ((struct net_device *)(__mptr - __builtin_offsetof(struct net_device, dev_list))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net_device *first_net_device(struct net *net)
{
return list_empty(&net->dev_base_head) ? ((void *)0) :
({ void *__mptr = (void *)(net->dev_base_head.next); _Static_assert(__builtin_types_compatible_p(typeof(*(net->dev_base_head.next)), typeof(((struct net_device *)0)->dev_list)) || __builtin_types_compatible_p(typeof(*(net->dev_base_head.next)), typeof(void)), "pointer type mismatch in container_of()"); ((struct net_device *)(__mptr - __builtin_offsetof(struct net_device, dev_list))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net_device *first_net_device_rcu(struct net *net)
{
struct list_head *lh = ({ typeof(*((*((struct list_head **)(&(&net->dev_base_head)->next))))) *__UNIQUE_ID_rcu295 = (typeof(*((*((struct list_head **)(&(&net->dev_base_head)->next))))) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_296(void) ; if (!((sizeof(((*((struct list_head **)(&(&net->dev_base_head)->next))))) == sizeof(char) || sizeof(((*((struct list_head **)(&(&net->dev_base_head)->next))))) == sizeof(short) || sizeof(((*((struct list_head **)(&(&net->dev_base_head)->next))))) == sizeof(int) || sizeof(((*((struct list_head **)(&(&net->dev_base_head)->next))))) == sizeof(long)) || sizeof(((*((struct list_head **)(&(&net->dev_base_head)->next))))) == sizeof(long long))) __compiletime_assert_296(); } while (0); (*(const volatile typeof( _Generic((((*((struct list_head **)(&(&net->dev_base_head)->next))))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((*((struct list_head **)(&(&net->dev_base_head)->next))))))) *)&(((*((struct list_head **)(&(&net->dev_base_head)->next)))))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*((*((struct list_head **)(&(&net->dev_base_head)->next))))) *)(__UNIQUE_ID_rcu295)); });

return lh == &net->dev_base_head ? ((void *)0) : ({ void *__mptr = (void *)(lh); _Static_assert(__builtin_types_compatible_p(typeof(*(lh)), typeof(((struct net_device *)0)->dev_list)) || __builtin_types_compatible_p(typeof(*(lh)), typeof(void)), "pointer type mismatch in container_of()"); ((struct net_device *)(__mptr - __builtin_offsetof(struct net_device, dev_list))); });
}

int netdev_boot_setup_check(struct net_device *dev);
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
const char *hwaddr);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
void dev_add_pack(struct packet_type *pt);
void dev_remove_pack(struct packet_type *pt);
void __dev_remove_pack(struct packet_type *pt);
void dev_add_offload(struct packet_offload *po);
void dev_remove_offload(struct packet_offload *po);

int dev_get_iflink(const struct net_device *dev);
int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
struct net_device_path_stack *stack);
struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
bool netdev_name_in_use(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
void dev_close(struct net_device *dev);
void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);

int dev_queue_xmit(struct sk_buff *skb);
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
{
int ret;

ret = __dev_direct_xmit(skb, queue_id);
if (!dev_xmit_complete(ret))
kfree_skb(skb);
return ret;
}

int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void unregister_netdevice(struct net_device *dev)
{
unregister_netdevice_queue(dev, ((void *)0));
}

int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
void netdev_freemem(struct net_device *dev);
int init_dummy_netdev(struct net_device *dev);

struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
struct sock *sk);
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
unsigned int len)
{
if (!dev->header_ops || !dev->header_ops->create)
return 0;

return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dev_parse_header(const struct sk_buff *skb,
unsigned char *haddr)
{
const struct net_device *dev = skb->dev;

if (!dev->header_ops || !dev->header_ops->parse)
return 0;
return dev->header_ops->parse(skb, haddr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be16 dev_parse_header_protocol(const struct sk_buff *skb)
{
const struct net_device *dev = skb->dev;

if (!dev->header_ops || !dev->header_ops->parse_protocol)
return 0;
return dev->header_ops->parse_protocol(skb);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dev_validate_header(const struct net_device *dev,
char *ll_header, int len)
{
if (__builtin_expect(!!(len >= dev->hard_header_len), 1))
return true;
if (len < dev->min_header_len)
return false;

if (capable(17)) {
memset(ll_header + len, 0, dev->hard_header_len - len);
return true;
}

if (dev->header_ops && dev->header_ops->validate)
return dev->header_ops->validate(ll_header, len);

return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dev_has_header(const struct net_device *dev)
{
return dev->header_ops && dev->header_ops->create;
}



struct sd_flow_limit {
u64 count;
unsigned int num_buckets;
unsigned int history_head;
u16 history[(1 << 7)];
u8 buckets[];
};

extern int netdev_flow_limit_table_len;





struct softnet_data {
struct list_head poll_list;
struct sk_buff_head process_queue;


unsigned int processed;
unsigned int time_squeeze;
unsigned int received_rps;

struct softnet_data *rps_ipi_list;


struct sd_flow_limit *flow_limit;

struct Qdisc *output_queue;
struct Qdisc **output_queue_tailp;
struct sk_buff *completion_queue;




struct {
u16 recursion;
u8 more;
} xmit;




unsigned int input_queue_head __attribute__((__aligned__((1 << 6))));


call_single_data_t csd __attribute__((__aligned__((1 << 6))));
struct softnet_data *rps_ipi_next;
unsigned int cpu;
unsigned int input_queue_tail;

unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;

};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void input_queue_head_incr(struct softnet_data *sd)
{

sd->input_queue_head++;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void input_queue_tail_incr_save(struct softnet_data *sd,
unsigned int *qtail)
{

*qtail = ++sd->input_queue_tail;

}

extern __attribute__((section(".data..percpu" "..shared_aligned"))) __typeof__(struct softnet_data) softnet_data __attribute__((__aligned__((1 << 6))));

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dev_recursion_level(void)
{
return ({ typeof(softnet_data.xmit.recursion) pscr_ret__; do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(softnet_data.xmit.recursion)) { case 1: pscr_ret__ = ({ typeof(softnet_data.xmit.recursion) __ret; if ((sizeof(softnet_data.xmit.recursion) == sizeof(char) || sizeof(softnet_data.xmit.recursion) == sizeof(short) || sizeof(softnet_data.xmit.recursion) == sizeof(int) || sizeof(softnet_data.xmit.recursion) == sizeof(long))) __ret = ({ typeof(softnet_data.xmit.recursion) ___ret; do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ___ret = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_297(void) ; if (!((sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(char) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(short) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(int) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long)) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long long))) __compiletime_assert_297(); } while (0); (*(const volatile typeof( _Generic((*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })))) *)&(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }))); }); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); ___ret; }); else __ret = ({ typeof(softnet_data.xmit.recursion) ___ret; unsigned long ___flags; do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); ___flags = arch_local_irq_save(); } while (0); ___ret = ({ *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(___flags); } while (0); ___ret; }); __ret; }); break; case 2: pscr_ret__ = ({ typeof(softnet_data.xmit.recursion) __ret; if ((sizeof(softnet_data.xmit.recursion) == sizeof(char) || sizeof(softnet_data.xmit.recursion) == sizeof(short) || sizeof(softnet_data.xmit.recursion) == sizeof(int) || sizeof(softnet_data.xmit.recursion) == sizeof(long))) __ret = ({ typeof(softnet_data.xmit.recursion) ___ret; do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ___ret = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_298(void) ; if (!((sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(char) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(short) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(int) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long)) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long long))) __compiletime_assert_298(); } while (0); (*(const volatile typeof( _Generic((*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })))) *)&(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }))); }); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); ___ret; }); else __ret = ({ typeof(softnet_data.xmit.recursion) ___ret; unsigned long ___flags; do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); ___flags = arch_local_irq_save(); } while (0); ___ret = ({ *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(___flags); } while (0); ___ret; }); __ret; }); break; case 4: pscr_ret__ = ({ typeof(softnet_data.xmit.recursion) __ret; if ((sizeof(softnet_data.xmit.recursion) == sizeof(char) || sizeof(softnet_data.xmit.recursion) == sizeof(short) || sizeof(softnet_data.xmit.recursion) == sizeof(int) || sizeof(softnet_data.xmit.recursion) == sizeof(long))) __ret = ({ typeof(softnet_data.xmit.recursion) ___ret; do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ___ret = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_299(void) ; if (!((sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(char) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(short) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(int) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long)) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long long))) __compiletime_assert_299(); } while (0); (*(const volatile typeof( _Generic((*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })))) *)&(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }))); }); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); ___ret; }); else __ret = ({ typeof(softnet_data.xmit.recursion) ___ret; unsigned long ___flags; do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); ___flags = arch_local_irq_save(); } while (0); ___ret = ({ *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(___flags); } while (0); ___ret; }); __ret; }); break; case 8: pscr_ret__ = ({ typeof(softnet_data.xmit.recursion) __ret; if ((sizeof(softnet_data.xmit.recursion) == sizeof(char) || sizeof(softnet_data.xmit.recursion) == sizeof(short) || sizeof(softnet_data.xmit.recursion) == sizeof(int) || sizeof(softnet_data.xmit.recursion) == sizeof(long))) __ret = ({ typeof(softnet_data.xmit.recursion) ___ret; do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ___ret = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_300(void) ; if (!((sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(char) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(short) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(int) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long)) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long long))) __compiletime_assert_300(); } while (0); (*(const volatile typeof( _Generic((*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })))) *)&(*({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }))); }); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); ___ret; }); else __ret = ({ typeof(softnet_data.xmit.recursion) ___ret; unsigned long ___flags; do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); ___flags = arch_local_irq_save(); } while (0); ___ret = ({ *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(___flags); } while (0); ___ret; }); __ret; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; });
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dev_xmit_recursion(void)
{
return __builtin_expect(!!(({ __this_cpu_preempt_check("read"); ({ typeof(softnet_data.xmit.recursion) pscr_ret__; do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(softnet_data.xmit.recursion)) { case 1: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 2: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 4: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 8: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); }) > 8), 0);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_xmit_recursion_inc(void)
{
({ __this_cpu_preempt_check("add"); do { do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(softnet_data.xmit.recursion)) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; default: __bad_size_call_parameter();break; } } while (0); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_xmit_recursion_dec(void)
{
({ __this_cpu_preempt_check("add"); do { do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(softnet_data.xmit.recursion)) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(softnet_data.xmit.recursion))(1); } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(softnet_data.xmit.recursion))(1); } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(softnet_data.xmit.recursion))(1); } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion))); (typeof((typeof(*(&(softnet_data.xmit.recursion))) *)(&(softnet_data.xmit.recursion)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(softnet_data.xmit.recursion))(1); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); });
}

void __netif_schedule(struct Qdisc *q);
void netif_schedule_queue(struct netdev_queue *txq);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_tx_schedule_all(struct net_device *dev)
{
unsigned int i;

for (i = 0; i < dev->num_tx_queues; i++)
netif_schedule_queue(netdev_get_tx_queue(dev, i));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_start_queue(struct net_device *dev)
{
netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_tx_start_all_queues(struct net_device *dev)
{
unsigned int i;

for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_start_queue(txq);
}
}

void netif_tx_wake_queue(struct netdev_queue *dev_queue);
# 3174 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_wake_queue(struct net_device *dev)
{
netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_tx_wake_all_queues(struct net_device *dev)
{
unsigned int i;

for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_wake_queue(txq);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
# 3201 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_stop_queue(struct net_device *dev)
{
netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
}

void netif_tx_stop_all_queues(struct net_device *dev);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
return arch_test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_queue_stopped(const struct net_device *dev)
{
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
{
return dev_queue->state & ((1 << __QUEUE_STATE_DRV_XOFF) | (1 << __QUEUE_STATE_STACK_XOFF));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
{
return dev_queue->state & (((1 << __QUEUE_STATE_DRV_XOFF) | (1 << __QUEUE_STATE_STACK_XOFF)) | (1 << __QUEUE_STATE_FROZEN));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
{
return dev_queue->state & ((1 << __QUEUE_STATE_DRV_XOFF) | (1 << __QUEUE_STATE_FROZEN));
}
# 3251 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue,
unsigned int min_limit)
{

dev_queue->dql.min_limit = min_limit;

}
# 3266 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
{

__builtin_prefetch(&dev_queue->dql.num_queued,1);

}
# 3280 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
{

__builtin_prefetch(&dev_queue->dql.limit,1);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes)
{

dql_queued(&dev_queue->dql, bytes);

if (__builtin_expect(!!(dql_avail(&dev_queue->dql) >= 0), 1))
return;

set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);






do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);


if (__builtin_expect(!!(dql_avail(&dev_queue->dql) >= 0), 0))
clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);

}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes,
bool xmit_more)
{
if (xmit_more) {

dql_queued(&dev_queue->dql, bytes);

return netif_tx_queue_stopped(dev_queue);
}
netdev_tx_sent_queue(dev_queue, bytes);
return true;
}
# 3340 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
{
netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __netdev_sent_queue(struct net_device *dev,
unsigned int bytes,
bool xmit_more)
{
return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
xmit_more);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
unsigned int pkts, unsigned int bytes)
{

if (__builtin_expect(!!(!bytes), 0))
return;

dql_completed(&dev_queue->dql, bytes);






do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);

if (__builtin_expect(!!(dql_avail(&dev_queue->dql) < 0), 0))
return;

if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
netif_schedule_queue(dev_queue);

}
# 3387 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_completed_queue(struct net_device *dev,
unsigned int pkts, unsigned int bytes)
{
netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_tx_reset_queue(struct netdev_queue *q)
{

clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
dql_reset(&q->dql);

}
# 3408 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_reset_queue(struct net_device *dev_queue)
{
netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
}
# 3421 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
if (__builtin_expect(!!(queue_index >= dev->real_num_tx_queues), 0)) {
do { if (net_ratelimit()) ({ do {} while (0); _printk("\001" "4" "IPv6: " "%s selects TX queue %d, but real number of TX queues is %d\n", dev->name, queue_index, dev->real_num_tx_queues); }); } while (0);


return 0;
}

return queue_index;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_running(const struct net_device *dev)
{
return arch_test_bit(__LINK_STATE_START, &dev->state);
}
# 3458 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_start_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);

netif_tx_start_queue(txq);
}
# 3472 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
netif_tx_stop_queue(txq);
}
# 3485 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __netif_subqueue_stopped(const struct net_device *dev,
u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);

return netif_tx_queue_stopped(txq);
}
# 3500 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_subqueue_stopped(const struct net_device *dev,
struct sk_buff *skb)
{
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
# 3513 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);

netif_tx_wake_queue(txq);
}


int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
u16 index);
int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
u16 index, enum xps_map_type type);
# 3534 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_attr_test_mask(unsigned long j,
const unsigned long *mask,
unsigned int nr_bits)
{
cpu_max_bits_warn(j, nr_bits);
return arch_test_bit(j, mask);
}
# 3550 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_attr_test_online(unsigned long j,
const unsigned long *online_mask,
unsigned int nr_bits)
{
cpu_max_bits_warn(j, nr_bits);

if (online_mask)
return arch_test_bit(j, online_mask);

return (j < nr_bits);
}
# 3570 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
unsigned int nr_bits)
{

if (n != -1)
cpu_max_bits_warn(n, nr_bits);

if (srcp)
return find_next_bit(srcp, nr_bits, n + 1);

return n + 1;
}
# 3592 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int netif_attrmask_next_and(int n, const unsigned long *src1p,
const unsigned long *src2p,
unsigned int nr_bits)
{

if (n != -1)
cpu_max_bits_warn(n, nr_bits);

if (src1p && src2p)
return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
else if (src1p)
return find_next_bit(src1p, nr_bits, n + 1);
else if (src2p)
return find_next_bit(src2p, nr_bits, n + 1);

return n + 1;
}
# 3631 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_multiqueue(const struct net_device *dev)
{
return dev->num_tx_queues > 1;
}

int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);


int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
# 3648 "./include/linux/netdevice.h"
int netif_set_real_num_queues(struct net_device *dev,
unsigned int txq, unsigned int rxq);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct netdev_rx_queue *
__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
{
return dev->_rx + rxq;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int get_netdev_rx_queue_index(
struct netdev_rx_queue *queue)
{
struct net_device *dev = queue->dev;
int index = queue - dev->_rx;

do { if (__builtin_expect(!!(index >= dev->num_rx_queues), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/netdevice.h"), "i" (3664), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
return index;
}


int netif_get_num_default_rss_queues(void);

enum skb_free_reason {
SKB_REASON_CONSUMED,
SKB_REASON_DROPPED,
};

void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
# 3698 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_kfree_skb_irq(struct sk_buff *skb)
{
__dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_consume_skb_irq(struct sk_buff *skb)
{
__dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_kfree_skb_any(struct sk_buff *skb)
{
__dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_consume_skb_any(struct sk_buff *skb)
{
__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
}

u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog);
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
int netif_rx(struct sk_buff *skb);
int __netif_rx(struct sk_buff *skb);

int netif_receive_skb(struct sk_buff *skb);
int netif_receive_skb_core(struct sk_buff *skb);
void netif_receive_skb_list_internal(struct list_head *head);
void netif_receive_skb_list(struct list_head *head);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
struct packet_offload *gro_find_receive_by_type(__be16 type);
struct packet_offload *gro_find_complete_by_type(__be16 type);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void napi_free_frags(struct napi_struct *napi)
{
kfree_skb(napi->skb);
napi->skb = ((void *)0);
}

bool netdev_is_rx_handler_busy(struct net_device *dev);
int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
void *rx_handler_data);
void netdev_rx_handler_unregister(struct net_device *dev);

bool dev_valid_name(const char *name);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_socket_ioctl_cmd(unsigned int cmd)
{
return (((cmd) >> (0 +8)) & ((1 << 8)-1)) == 0x89;
}
int get_user_ifreq(struct ifreq *ifr, void **ifrdata, void *arg);
int put_user_ifreq(struct ifreq *ifr, void *arg);
int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
void *data, bool *need_copyout);
int dev_ifconf(struct net *net, struct ifconf *ifc);
int dev_ethtool(struct net *net, struct ifreq *ifr, void *userdata);
unsigned int dev_get_flags(const struct net_device *);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
int dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
void __dev_notify_flags(struct net_device *, unsigned int old_flags,
unsigned int gchanges);
int dev_change_name(struct net_device *, const char *);
int dev_set_alias(struct net_device *, const char *, size_t);
int dev_get_alias(const struct net_device *, char *, size_t);
int __dev_change_net_namespace(struct net_device *dev, struct net *net,
const char *pat, int new_ifindex);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int dev_change_net_namespace(struct net_device *dev, struct net *net,
const char *pat)
{
return __dev_change_net_namespace(dev, net, pat, 0);
}
int __dev_set_mtu(struct net_device *, int);
int dev_validate_mtu(struct net_device *dev, int mtu,
struct netlink_ext_ack *extack);
int dev_set_mtu_ext(struct net_device *dev, int mtu,
struct netlink_ext_ack *extack);
int dev_set_mtu(struct net_device *, int);
int dev_change_tx_queue_len(struct net_device *, unsigned long);
void dev_set_group(struct net_device *, int);
int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
struct netlink_ext_ack *extack);
int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
struct netlink_ext_ack *extack);
int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
struct netlink_ext_ack *extack);
int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
int dev_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
u32 value);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);

typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, int expected_fd, u32 flags);
int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
u8 dev_xdp_prog_count(struct net_device *dev);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);

int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(const struct net_device *dev,
const struct sk_buff *skb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool __is_skb_forwardable(const struct net_device *dev,
const struct sk_buff *skb,
const bool check_mtu)
{
const u32 vlan_hdr_len = 4;
unsigned int len;

if (!(dev->flags & IFF_UP))
return false;

if (!check_mtu)
return true;

len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
if (skb->len <= len)
return true;




if (skb_is_gso(skb))
return true;

return false;
}

struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net_device_core_stats *dev_core_stats(struct net_device *dev)
{

struct net_device_core_stats *p = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_301(void) ; if (!((sizeof(dev->core_stats) == sizeof(char) || sizeof(dev->core_stats) == sizeof(short) || sizeof(dev->core_stats) == sizeof(int) || sizeof(dev->core_stats) == sizeof(long)) || sizeof(dev->core_stats) == sizeof(long long))) __compiletime_assert_301(); } while (0); (*(const volatile typeof( _Generic((dev->core_stats), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (dev->core_stats))) *)&(dev->core_stats)); });

if (__builtin_expect(!!(p), 1))
return p;

return netdev_core_stats_alloc(dev);
}
# 3868 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_core_stats_rx_dropped_inc(struct net_device *dev) { struct net_device_core_stats *p; p = dev_core_stats(dev); if (p) do { do { const void *__vpp_verify = (typeof((&(p->rx_dropped)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(p->rx_dropped)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(p->rx_dropped)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(p->rx_dropped))) *)(&(p->rx_dropped))); (typeof((typeof(*(&(p->rx_dropped))) *)(&(p->rx_dropped)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(p->rx_dropped)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(p->rx_dropped))) *)(&(p->rx_dropped))); (typeof((typeof(*(&(p->rx_dropped))) *)(&(p->rx_dropped)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(p->rx_dropped)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(p->rx_dropped))) *)(&(p->rx_dropped))); (typeof((typeof(*(&(p->rx_dropped))) *)(&(p->rx_dropped)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(p->rx_dropped)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(p->rx_dropped))) *)(&(p->rx_dropped))); (typeof((typeof(*(&(p->rx_dropped))) *)(&(p->rx_dropped)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_core_stats_tx_dropped_inc(struct net_device *dev) { struct net_device_core_stats *p; p = dev_core_stats(dev); if (p) do { do { const void *__vpp_verify = (typeof((&(p->tx_dropped)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(p->tx_dropped)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(p->tx_dropped)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(p->tx_dropped))) *)(&(p->tx_dropped))); (typeof((typeof(*(&(p->tx_dropped))) *)(&(p->tx_dropped)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(p->tx_dropped)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(p->tx_dropped))) *)(&(p->tx_dropped))); (typeof((typeof(*(&(p->tx_dropped))) *)(&(p->tx_dropped)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(p->tx_dropped)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(p->tx_dropped))) *)(&(p->tx_dropped))); (typeof((typeof(*(&(p->tx_dropped))) *)(&(p->tx_dropped)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(p->tx_dropped)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(p->tx_dropped))) *)(&(p->tx_dropped))); (typeof((typeof(*(&(p->tx_dropped))) *)(&(p->tx_dropped)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_core_stats_rx_nohandler_inc(struct net_device *dev) { struct net_device_core_stats *p; p = dev_core_stats(dev); if (p) do { do { const void *__vpp_verify = (typeof((&(p->rx_nohandler)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(p->rx_nohandler)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(p->rx_nohandler)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(p->rx_nohandler))) *)(&(p->rx_nohandler))); (typeof((typeof(*(&(p->rx_nohandler))) *)(&(p->rx_nohandler)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(p->rx_nohandler)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(p->rx_nohandler))) *)(&(p->rx_nohandler))); (typeof((typeof(*(&(p->rx_nohandler))) *)(&(p->rx_nohandler)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(p->rx_nohandler)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(p->rx_nohandler))) *)(&(p->rx_nohandler))); (typeof((typeof(*(&(p->rx_nohandler))) *)(&(p->rx_nohandler)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(p->rx_nohandler)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(p->rx_nohandler))) *)(&(p->rx_nohandler))); (typeof((typeof(*(&(p->rx_nohandler))) *)(&(p->rx_nohandler)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int ____dev_forward_skb(struct net_device *dev,
struct sk_buff *skb,
const bool check_mtu)
{
if (skb_orphan_frags(skb, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u))) ||
__builtin_expect(!!(!__is_skb_forwardable(dev, skb, check_mtu)), 0)) {
dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);
return 1;
}

skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
skb->priority = 0;
return 0;
}

bool dev_nit_active(struct net_device *dev);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);

extern int netdev_budget;
extern unsigned int netdev_budget_usecs;


void netdev_run_todo(void);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __dev_put(struct net_device *dev)
{
if (dev) {

do { do { const void *__vpp_verify = (typeof((&(*dev->pcpu_refcnt)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*dev->pcpu_refcnt)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*dev->pcpu_refcnt)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt))); (typeof((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*dev->pcpu_refcnt))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*dev->pcpu_refcnt)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt))); (typeof((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*dev->pcpu_refcnt))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*dev->pcpu_refcnt)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt))); (typeof((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*dev->pcpu_refcnt))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*dev->pcpu_refcnt)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt))); (typeof((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(*dev->pcpu_refcnt))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);



}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __dev_hold(struct net_device *dev)
{
if (dev) {

do { do { const void *__vpp_verify = (typeof((&(*dev->pcpu_refcnt)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*dev->pcpu_refcnt)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*dev->pcpu_refcnt)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt))); (typeof((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*dev->pcpu_refcnt)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt))); (typeof((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*dev->pcpu_refcnt)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt))); (typeof((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*dev->pcpu_refcnt)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt))); (typeof((typeof(*(&(*dev->pcpu_refcnt))) *)(&(*dev->pcpu_refcnt)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);



}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __netdev_tracker_alloc(struct net_device *dev,
netdevice_tracker *tracker,
gfp_t gfp)
{



}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_tracker_alloc(struct net_device *dev,
netdevice_tracker *tracker, gfp_t gfp)
{




}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netdev_tracker_free(struct net_device *dev,
netdevice_tracker *tracker)
{



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_hold_track(struct net_device *dev,
netdevice_tracker *tracker, gfp_t gfp)
{
if (dev) {
__dev_hold(dev);
__netdev_tracker_alloc(dev, tracker, gfp);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_put_track(struct net_device *dev,
netdevice_tracker *tracker)
{
if (dev) {
netdev_tracker_free(dev, tracker);
__dev_put(dev);
}
}
# 3973 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_hold(struct net_device *dev)
{
dev_hold_track(dev, ((void *)0), ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
}
# 3985 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_put(struct net_device *dev)
{
dev_put_track(dev, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_replace_track(struct net_device *odev,
struct net_device *ndev,
netdevice_tracker *tracker,
gfp_t gfp)
{
if (odev)
netdev_tracker_free(odev, tracker);

__dev_hold(ndev);
__dev_put(odev);

if (ndev)
__netdev_tracker_alloc(ndev, tracker, gfp);
}
# 4014 "./include/linux/netdevice.h"
void linkwatch_init_dev(struct net_device *dev);
void linkwatch_fire_event(struct net_device *dev);
void linkwatch_forget_dev(struct net_device *dev);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_carrier_ok(const struct net_device *dev)
{
return !arch_test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}

unsigned long dev_trans_start(struct net_device *dev);

void __netdev_watchdog_up(struct net_device *dev);

void netif_carrier_on(struct net_device *dev);
void netif_carrier_off(struct net_device *dev);
void netif_carrier_event(struct net_device *dev);
# 4049 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_dormant_on(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
linkwatch_fire_event(dev);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_dormant_off(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
linkwatch_fire_event(dev);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_dormant(const struct net_device *dev)
{
return arch_test_bit(__LINK_STATE_DORMANT, &dev->state);
}
# 4089 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_testing_on(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
linkwatch_fire_event(dev);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_testing_off(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
linkwatch_fire_event(dev);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_testing(const struct net_device *dev)
{
return arch_test_bit(__LINK_STATE_TESTING, &dev->state);
}
# 4125 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_oper_up(const struct net_device *dev)
{
return (dev->operstate == IF_OPER_UP ||
dev->operstate == IF_OPER_UNKNOWN );
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_device_present(const struct net_device *dev)
{
return arch_test_bit(__LINK_STATE_PRESENT, &dev->state);
}

void netif_device_detach(struct net_device *dev);

void netif_device_attach(struct net_device *dev);





enum {
NETIF_MSG_DRV_BIT,
NETIF_MSG_PROBE_BIT,
NETIF_MSG_LINK_BIT,
NETIF_MSG_TIMER_BIT,
NETIF_MSG_IFDOWN_BIT,
NETIF_MSG_IFUP_BIT,
NETIF_MSG_RX_ERR_BIT,
NETIF_MSG_TX_ERR_BIT,
NETIF_MSG_TX_QUEUED_BIT,
NETIF_MSG_INTR_BIT,
NETIF_MSG_TX_DONE_BIT,
NETIF_MSG_RX_STATUS_BIT,
NETIF_MSG_PKTDATA_BIT,
NETIF_MSG_HW_BIT,
NETIF_MSG_WOL_BIT,




NETIF_MSG_CLASS_COUNT,
};

_Static_assert(NETIF_MSG_CLASS_COUNT <= 32, "NETIF_MSG_CLASS_COUNT <= 32");
# 4210 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
{

if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
return default_msg_enable_bits;
if (debug_value == 0)
return 0;

return (1U << debug_value) - 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_302(void) ; if (!((sizeof(txq->xmit_lock_owner) == sizeof(char) || sizeof(txq->xmit_lock_owner) == sizeof(short) || sizeof(txq->xmit_lock_owner) == sizeof(int) || sizeof(txq->xmit_lock_owner) == sizeof(long)) || sizeof(txq->xmit_lock_owner) == sizeof(long long))) __compiletime_assert_302(); } while (0); do { *(volatile typeof(txq->xmit_lock_owner) *)&(txq->xmit_lock_owner) = (cpu); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __netif_tx_acquire(struct netdev_queue *txq)
{
(void)0;
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __netif_tx_release(struct netdev_queue *txq)
{
(void)0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_303(void) ; if (!((sizeof(txq->xmit_lock_owner) == sizeof(char) || sizeof(txq->xmit_lock_owner) == sizeof(short) || sizeof(txq->xmit_lock_owner) == sizeof(int) || sizeof(txq->xmit_lock_owner) == sizeof(long)) || sizeof(txq->xmit_lock_owner) == sizeof(long long))) __compiletime_assert_303(); } while (0); do { *(volatile typeof(txq->xmit_lock_owner) *)&(txq->xmit_lock_owner) = ((((struct thread_info *)get_current())->cpu)); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __netif_tx_trylock(struct netdev_queue *txq)
{
bool ok = spin_trylock(&txq->_xmit_lock);

if (__builtin_expect(!!(ok), 1)) {

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_304(void) ; if (!((sizeof(txq->xmit_lock_owner) == sizeof(char) || sizeof(txq->xmit_lock_owner) == sizeof(short) || sizeof(txq->xmit_lock_owner) == sizeof(int) || sizeof(txq->xmit_lock_owner) == sizeof(long)) || sizeof(txq->xmit_lock_owner) == sizeof(long long))) __compiletime_assert_304(); } while (0); do { *(volatile typeof(txq->xmit_lock_owner) *)&(txq->xmit_lock_owner) = ((((struct thread_info *)get_current())->cpu)); } while (0); } while (0);
}
return ok;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __netif_tx_unlock(struct netdev_queue *txq)
{

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_305(void) ; if (!((sizeof(txq->xmit_lock_owner) == sizeof(char) || sizeof(txq->xmit_lock_owner) == sizeof(short) || sizeof(txq->xmit_lock_owner) == sizeof(int) || sizeof(txq->xmit_lock_owner) == sizeof(long)) || sizeof(txq->xmit_lock_owner) == sizeof(long long))) __compiletime_assert_305(); } while (0); do { *(volatile typeof(txq->xmit_lock_owner) *)&(txq->xmit_lock_owner) = (-1); } while (0); } while (0);
spin_unlock(&txq->_xmit_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __netif_tx_unlock_bh(struct netdev_queue *txq)
{

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_306(void) ; if (!((sizeof(txq->xmit_lock_owner) == sizeof(char) || sizeof(txq->xmit_lock_owner) == sizeof(short) || sizeof(txq->xmit_lock_owner) == sizeof(int) || sizeof(txq->xmit_lock_owner) == sizeof(long)) || sizeof(txq->xmit_lock_owner) == sizeof(long long))) __compiletime_assert_306(); } while (0); do { *(volatile typeof(txq->xmit_lock_owner) *)&(txq->xmit_lock_owner) = (-1); } while (0); } while (0);
spin_unlock_bh(&txq->_xmit_lock);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void txq_trans_update(struct netdev_queue *txq)
{
if (txq->xmit_lock_owner != -1)
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_307(void) ; if (!((sizeof(txq->trans_start) == sizeof(char) || sizeof(txq->trans_start) == sizeof(short) || sizeof(txq->trans_start) == sizeof(int) || sizeof(txq->trans_start) == sizeof(long)) || sizeof(txq->trans_start) == sizeof(long long))) __compiletime_assert_307(); } while (0); do { *(volatile typeof(txq->trans_start) *)&(txq->trans_start) = (jiffies); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void txq_trans_cond_update(struct netdev_queue *txq)
{
unsigned long now = jiffies;

if (({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_308(void) ; if (!((sizeof(txq->trans_start) == sizeof(char) || sizeof(txq->trans_start) == sizeof(short) || sizeof(txq->trans_start) == sizeof(int) || sizeof(txq->trans_start) == sizeof(long)) || sizeof(txq->trans_start) == sizeof(long long))) __compiletime_assert_308(); } while (0); (*(const volatile typeof( _Generic((txq->trans_start), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (txq->trans_start))) *)&(txq->trans_start)); }) != now)
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_309(void) ; if (!((sizeof(txq->trans_start) == sizeof(char) || sizeof(txq->trans_start) == sizeof(short) || sizeof(txq->trans_start) == sizeof(int) || sizeof(txq->trans_start) == sizeof(long)) || sizeof(txq->trans_start) == sizeof(long long))) __compiletime_assert_309(); } while (0); do { *(volatile typeof(txq->trans_start) *)&(txq->trans_start) = (now); } while (0); } while (0);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_trans_update(struct net_device *dev)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);

txq_trans_cond_update(txq);
}







void netif_tx_lock(struct net_device *dev);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_tx_lock_bh(struct net_device *dev)
{
local_bh_disable();
netif_tx_lock(dev);
}

void netif_tx_unlock(struct net_device *dev);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_tx_unlock_bh(struct net_device *dev)
{
netif_tx_unlock(dev);
local_bh_enable();
}
# 4339 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_tx_disable(struct net_device *dev)
{
unsigned int i;
int cpu;

local_bh_disable();
cpu = (((struct thread_info *)get_current())->cpu);
spin_lock(&dev->tx_global_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);

__netif_tx_lock(txq, cpu);
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
spin_unlock(&dev->tx_global_lock);
local_bh_enable();
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_addr_lock(struct net_device *dev)
{
unsigned char nest_level = 0;


nest_level = dev->nested_level;

do { _raw_spin_lock_nested(spinlock_check(&dev->addr_list_lock), nest_level); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_addr_lock_bh(struct net_device *dev)
{
unsigned char nest_level = 0;


nest_level = dev->nested_level;

local_bh_disable();
do { _raw_spin_lock_nested(spinlock_check(&dev->addr_list_lock), nest_level); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_addr_unlock(struct net_device *dev)
{
spin_unlock(&dev->addr_list_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_addr_unlock_bh(struct net_device *dev)
{
spin_unlock_bh(&dev->addr_list_lock);
}
# 4398 "./include/linux/netdevice.h"
void ether_setup(struct net_device *dev);


struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *),
unsigned int txqs, unsigned int rxqs);







int register_netdev(struct net_device *dev);
void unregister_netdev(struct net_device *dev);

int devm_register_netdev(struct device *dev, struct net_device *ndev);


int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*sync)(struct net_device *, const unsigned char *),
int (*unsync)(struct net_device *,
const unsigned char *));
int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *, int),
int (*unsync)(struct net_device *,
const unsigned char *, int));
void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *, int));
void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *));
void __hw_addr_init(struct netdev_hw_addr_list *list);


void dev_addr_mod(struct net_device *dev, unsigned int offset,
const void *addr, size_t len);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
__dev_addr_set(struct net_device *dev, const void *addr, size_t len)
{
dev_addr_mod(dev, 0, addr, len);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dev_addr_set(struct net_device *dev, const u8 *addr)
{
__dev_addr_set(dev, addr, dev->addr_len);
}

int dev_addr_add(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
void dev_addr_flush(struct net_device *dev);
int dev_addr_init(struct net_device *dev);
void dev_addr_check(struct net_device *dev);


int dev_uc_add(struct net_device *dev, const unsigned char *addr);
int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
int dev_uc_del(struct net_device *dev, const unsigned char *addr);
int dev_uc_sync(struct net_device *to, struct net_device *from);
int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
void dev_uc_unsync(struct net_device *to, struct net_device *from);
void dev_uc_flush(struct net_device *dev);
void dev_uc_init(struct net_device *dev);
# 4485 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __dev_uc_sync(struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *),
int (*unsync)(struct net_device *,
const unsigned char *))
{
return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
}
# 4501 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __dev_uc_unsync(struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
{
__hw_addr_unsync_dev(&dev->uc, dev, unsync);
}


int dev_mc_add(struct net_device *dev, const unsigned char *addr);
int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
int dev_mc_del(struct net_device *dev, const unsigned char *addr);
int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
int dev_mc_sync(struct net_device *to, struct net_device *from);
int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
void dev_mc_unsync(struct net_device *to, struct net_device *from);
void dev_mc_flush(struct net_device *dev);
void dev_mc_init(struct net_device *dev);
# 4529 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __dev_mc_sync(struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *),
int (*unsync)(struct net_device *,
const unsigned char *))
{
return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
}
# 4545 "./include/linux/netdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __dev_mc_unsync(struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
{
__hw_addr_unsync_dev(&dev->mc, dev, unsync);
}


void dev_set_rx_mode(struct net_device *dev);
void __dev_set_rx_mode(struct net_device *dev);
int dev_set_promiscuity(struct net_device *dev, int inc);
int dev_set_allmulti(struct net_device *dev, int inc);
void netdev_state_change(struct net_device *dev);
void __netdev_notify_peers(struct net_device *dev);
void netdev_notify_peers(struct net_device *dev);
void netdev_features_change(struct net_device *dev);

void dev_load(struct net *net, const char *name);
struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage);
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats);
void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
const struct pcpu_sw_netstats *netstats);
void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);

extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
extern int netdev_unregister_timeout_secs;
extern int weight_p;
extern int dev_weight_rx_bias;
extern int dev_weight_tx_bias;
extern int dev_rx_weight;
extern int dev_tx_weight;
extern int gro_normal_batch;

enum {
NESTED_SYNC_IMM_BIT,
NESTED_SYNC_TODO_BIT,
};







struct netdev_nested_priv {
unsigned char flags;
void *data;
};

bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
# 4608 "./include/linux/netdevice.h"
int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *upper_dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv);

bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev);

bool netdev_has_any_upper_dev(struct net_device *dev);

void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter);
void *netdev_lower_get_next_private_rcu(struct net_device *dev,
struct list_head **iter);
# 4635 "./include/linux/netdevice.h"
void *netdev_lower_get_next(struct net_device *dev,
struct list_head **iter);







struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
struct list_head **iter);
int netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *lower_dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv);
int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *lower_dev,
struct netdev_nested_priv *priv),
struct netdev_nested_priv *priv);

void *netdev_adjacent_get_private(struct list_head *adj_list);
void *netdev_lower_get_first_private_rcu(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
struct netlink_ext_ack *extack);
int netdev_master_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev,
void *upper_priv, void *upper_info,
struct netlink_ext_ack *extack);
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev);
int netdev_adjacent_change_prepare(struct net_device *old_dev,
struct net_device *new_dev,
struct net_device *dev,
struct netlink_ext_ack *extack);
void netdev_adjacent_change_commit(struct net_device *old_dev,
struct net_device *new_dev,
struct net_device *dev);
void netdev_adjacent_change_abort(struct net_device *old_dev,
struct net_device *new_dev,
struct net_device *dev);
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
void netdev_lower_state_changed(struct net_device *lower_dev,
void *lower_state_info);



extern u8 netdev_rss_key[52] ;
void netdev_rss_key_fill(void *buffer, size_t len);

int skb_checksum_help(struct sk_buff *skb);
int skb_crc32c_csum_help(struct sk_buff *skb);
int skb_csum_hwoffload_help(struct sk_buff *skb,
const netdev_features_t features);

struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path);
struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
netdev_features_t features, __be16 type);
struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
netdev_features_t features);

struct netdev_bonding_info {
ifslave slave;
ifbond master;
};

struct netdev_notifier_bonding_info {
struct netdev_notifier_info info;
struct netdev_bonding_info bonding_info;
};

void netdev_bonding_info_change(struct net_device *dev,
struct netdev_bonding_info *bonding_info);


void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
return __skb_gso_segment(skb, features, true);
}
__be16 skb_network_protocol(struct sk_buff *skb, int *depth);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool can_checksum_protocol(netdev_features_t features,
__be16 protocol)
{
if (protocol == (( __be16)(__builtin_constant_p((__u16)((0x8906))) ? ((__u16)( (((__u16)((0x8906)) & (__u16)0x00ffU) << 8) | (((__u16)((0x8906)) & (__u16)0xff00U) >> 8))) : __fswab16((0x8906)))))
return !!(features & ((netdev_features_t)1 << (NETIF_F_FCOE_CRC_BIT)));



if (features & ((netdev_features_t)1 << (NETIF_F_HW_CSUM_BIT))) {

return true;
}

switch (protocol) {
case (( __be16)(__builtin_constant_p((__u16)((0x0800))) ? ((__u16)( (((__u16)((0x0800)) & (__u16)0x00ffU) << 8) | (((__u16)((0x0800)) & (__u16)0xff00U) >> 8))) : __fswab16((0x0800)))):
return !!(features & ((netdev_features_t)1 << (NETIF_F_IP_CSUM_BIT)));
case (( __be16)(__builtin_constant_p((__u16)((0x86DD))) ? ((__u16)( (((__u16)((0x86DD)) & (__u16)0x00ffU) << 8) | (((__u16)((0x86DD)) & (__u16)0xff00U) >> 8))) : __fswab16((0x86DD)))):
return !!(features & ((netdev_features_t)1 << (NETIF_F_IPV6_CSUM_BIT)));
default:
return false;
}
}


void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);







void net_enable_timestamp(void);
void net_disable_timestamp(void);


int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) dev_proc_init(void);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
struct sk_buff *skb, struct net_device *dev,
bool more)
{
({ __this_cpu_preempt_check("write"); do { do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.more)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(softnet_data.xmit.more)) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.more)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more))); (typeof((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) = more; } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.more)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more))); (typeof((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) = more; } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.more)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more))); (typeof((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) = more; } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.more)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more))); (typeof((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) = more; } while (0);break; default: __bad_size_call_parameter();break; } } while (0); });
return ops->ndo_start_xmit(skb, dev);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netdev_xmit_more(void)
{
return ({ __this_cpu_preempt_check("read"); ({ typeof(softnet_data.xmit.more) pscr_ret__; do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.more)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(softnet_data.xmit.more)) { case 1: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.more)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more))); (typeof((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 2: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.more)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more))); (typeof((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 4: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.more)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more))); (typeof((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; case 8: pscr_ret__ = ({ *({ do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.more)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more))); (typeof((typeof(*(&(softnet_data.xmit.more))) *)(&(softnet_data.xmit.more)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, bool more)
{
const struct net_device_ops *ops = dev->netdev_ops;
netdev_tx_t rc;

rc = __netdev_start_xmit(ops, skb, dev, more);
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);

return rc;
}

int netdev_class_create_file_ns(const struct class_attribute *class_attr,
const void *ns);
void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
const void *ns);

extern const struct kobj_ns_type_operations net_ns_type_operations;

const char *netdev_drivername(const struct net_device *dev);

void linkwatch_run_queue(void);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) netdev_features_t netdev_intersect_features(netdev_features_t f1,
netdev_features_t f2)
{
if ((f1 ^ f2) & ((netdev_features_t)1 << (NETIF_F_HW_CSUM_BIT))) {
if (f1 & ((netdev_features_t)1 << (NETIF_F_HW_CSUM_BIT)))
f1 |= (((netdev_features_t)1 << (NETIF_F_IP_CSUM_BIT))|((netdev_features_t)1 << (NETIF_F_IPV6_CSUM_BIT)));
else
f2 |= (((netdev_features_t)1 << (NETIF_F_IP_CSUM_BIT))|((netdev_features_t)1 << (NETIF_F_IPV6_CSUM_BIT)));
}

return f1 & f2;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) netdev_features_t netdev_get_wanted_features(
struct net_device *dev)
{
return (dev->features & ~dev->hw_features) | dev->wanted_features;
}
netdev_features_t netdev_increment_features(netdev_features_t all,
netdev_features_t one, netdev_features_t mask);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) netdev_features_t netdev_add_tso_features(netdev_features_t features,
netdev_features_t mask)
{
return netdev_increment_features(features, (((netdev_features_t)1 << (NETIF_F_TSO_BIT)) | ((netdev_features_t)1 << (NETIF_F_TSO6_BIT)) | ((netdev_features_t)1 << (NETIF_F_TSO_ECN_BIT)) | ((netdev_features_t)1 << (NETIF_F_TSO_MANGLEID_BIT))), mask);
}

int __netdev_update_features(struct net_device *dev);
void netdev_update_features(struct net_device *dev);
void netdev_change_features(struct net_device *dev);

void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);

netdev_features_t passthru_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
netdev_features_t netif_skb_features(struct sk_buff *skb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool net_gso_ok(netdev_features_t features, int gso_type)
{
netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;


do { __attribute__((__noreturn__)) extern void __compiletime_assert_310(void) ; if (!(!(SKB_GSO_TCPV4 != (((netdev_features_t)1 << (NETIF_F_TSO_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_310(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_311(void) ; if (!(!(SKB_GSO_DODGY != (((netdev_features_t)1 << (NETIF_F_GSO_ROBUST_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_311(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_312(void) ; if (!(!(SKB_GSO_TCP_ECN != (((netdev_features_t)1 << (NETIF_F_TSO_ECN_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_312(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_313(void) ; if (!(!(SKB_GSO_TCP_FIXEDID != (((netdev_features_t)1 << (NETIF_F_TSO_MANGLEID_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_313(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_314(void) ; if (!(!(SKB_GSO_TCPV6 != (((netdev_features_t)1 << (NETIF_F_TSO6_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_314(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_315(void) ; if (!(!(SKB_GSO_FCOE != (((netdev_features_t)1 << (NETIF_F_FSO_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_315(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_316(void) ; if (!(!(SKB_GSO_GRE != (((netdev_features_t)1 << (NETIF_F_GSO_GRE_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_316(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_317(void) ; if (!(!(SKB_GSO_GRE_CSUM != (((netdev_features_t)1 << (NETIF_F_GSO_GRE_CSUM_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_317(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_318(void) ; if (!(!(SKB_GSO_IPXIP4 != (((netdev_features_t)1 << (NETIF_F_GSO_IPXIP4_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_318(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_319(void) ; if (!(!(SKB_GSO_IPXIP6 != (((netdev_features_t)1 << (NETIF_F_GSO_IPXIP6_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_319(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_320(void) ; if (!(!(SKB_GSO_UDP_TUNNEL != (((netdev_features_t)1 << (NETIF_F_GSO_UDP_TUNNEL_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_320(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_321(void) ; if (!(!(SKB_GSO_UDP_TUNNEL_CSUM != (((netdev_features_t)1 << (NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_321(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_322(void) ; if (!(!(SKB_GSO_PARTIAL != (((netdev_features_t)1 << (NETIF_F_GSO_PARTIAL_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_322(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_323(void) ; if (!(!(SKB_GSO_TUNNEL_REMCSUM != (((netdev_features_t)1 << (NETIF_F_GSO_TUNNEL_REMCSUM_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_323(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_324(void) ; if (!(!(SKB_GSO_SCTP != (((netdev_features_t)1 << (NETIF_F_GSO_SCTP_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_324(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_325(void) ; if (!(!(SKB_GSO_ESP != (((netdev_features_t)1 << (NETIF_F_GSO_ESP_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_325(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_326(void) ; if (!(!(SKB_GSO_UDP != (((netdev_features_t)1 << (NETIF_F_GSO_UDP_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_326(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_327(void) ; if (!(!(SKB_GSO_UDP_L4 != (((netdev_features_t)1 << (NETIF_F_GSO_UDP_L4_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_327(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_328(void) ; if (!(!(SKB_GSO_FRAGLIST != (((netdev_features_t)1 << (NETIF_F_GSO_FRAGLIST_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_328(); } while (0);

return (features & feature) == feature;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
{
return net_gso_ok(features, ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type) &&
(!skb_has_frag_list(skb) || (features & ((netdev_features_t)1 << (NETIF_F_FRAGLIST_BIT))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_needs_gso(struct sk_buff *skb,
netdev_features_t features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
__builtin_expect(!!((skb->ip_summed != 3) && (skb->ip_summed != 1)), 0));

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_set_gso_max_size(struct net_device *dev,
unsigned int size)
{

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_329(void) ; if (!((sizeof(dev->gso_max_size) == sizeof(char) || sizeof(dev->gso_max_size) == sizeof(short) || sizeof(dev->gso_max_size) == sizeof(int) || sizeof(dev->gso_max_size) == sizeof(long)) || sizeof(dev->gso_max_size) == sizeof(long long))) __compiletime_assert_329(); } while (0); do { *(volatile typeof(dev->gso_max_size) *)&(dev->gso_max_size) = (size); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_set_gso_max_segs(struct net_device *dev,
unsigned int segs)
{

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_330(void) ; if (!((sizeof(dev->gso_max_segs) == sizeof(char) || sizeof(dev->gso_max_segs) == sizeof(short) || sizeof(dev->gso_max_segs) == sizeof(int) || sizeof(dev->gso_max_segs) == sizeof(long)) || sizeof(dev->gso_max_segs) == sizeof(long long))) __compiletime_assert_330(); } while (0); do { *(volatile typeof(dev->gso_max_segs) *)&(dev->gso_max_segs) = (segs); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_set_gro_max_size(struct net_device *dev,
unsigned int size)
{

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_331(void) ; if (!((sizeof(dev->gro_max_size) == sizeof(char) || sizeof(dev->gro_max_size) == sizeof(short) || sizeof(dev->gro_max_size) == sizeof(int) || sizeof(dev->gro_max_size) == sizeof(long)) || sizeof(dev->gro_max_size) == sizeof(long long))) __compiletime_assert_331(); } while (0); do { *(volatile typeof(dev->gro_max_size) *)&(dev->gro_max_size) = (size); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
int pulled_hlen, u16 mac_offset,
int mac_len)
{
skb->protocol = protocol;
skb->encapsulation = 1;
skb_push(skb, pulled_hlen);
skb_reset_transport_header(skb);
skb->mac_header = mac_offset;
skb->network_header = skb->mac_header + mac_len;
skb->mac_len = mac_len;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_macsec(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACSEC;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_macvlan(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_macvlan_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN_PORT;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_bond_master(const struct net_device *dev)
{
return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_bond_slave(const struct net_device *dev)
{
return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_supports_nofcs(struct net_device *dev)
{
return dev->priv_flags & IFF_SUPP_NOFCS;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_has_l3_rx_handler(const struct net_device *dev)
{
return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_l3_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_L3MDEV_MASTER;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_l3_slave(const struct net_device *dev)
{
return dev->priv_flags & IFF_L3MDEV_SLAVE;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_bridge_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_EBRIDGE;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_bridge_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_BRIDGE_PORT;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_ovs_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_OPENVSWITCH;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_ovs_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_OVS_DATAPATH;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_any_bridge_port(const struct net_device *dev)
{
return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_team_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_TEAM;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_team_port(const struct net_device *dev)
{
return dev->priv_flags & IFF_TEAM_PORT;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_lag_master(const struct net_device *dev)
{
return netif_is_bond_master(dev) || netif_is_team_master(dev);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_lag_port(const struct net_device *dev)
{
return netif_is_bond_slave(dev) || netif_is_team_port(dev);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_rxfh_configured(const struct net_device *dev)
{
return dev->priv_flags & IFF_RXFH_CONFIGURED;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_failover(const struct net_device *dev)
{
return dev->priv_flags & IFF_FAILOVER;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_is_failover_slave(const struct net_device *dev)
{
return dev->priv_flags & IFF_FAILOVER_SLAVE;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void netif_keep_dst(struct net_device *dev)
{
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_reduces_vlan_mtu(struct net_device *dev)
{

return netif_is_macsec(dev);
}

extern struct pernet_operations loopback_net_ops;





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *netdev_name(const struct net_device *dev)
{
if (!dev->name[0] || strchr(dev->name, '%'))
return "(unnamed net_device)";
return dev->name;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netdev_unregistering(const struct net_device *dev)
{
return dev->reg_state == NETREG_UNREGISTERING;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *netdev_reg_state(const struct net_device *dev)
{
switch (dev->reg_state) {
case NETREG_UNINITIALIZED: return " (uninitialized)";
case NETREG_REGISTERED: return "";
case NETREG_UNREGISTERING: return " (unregistering)";
case NETREG_UNREGISTERED: return " (unregistered)";
case NETREG_RELEASED: return " (released)";
case NETREG_DUMMY: return " (dummy)";
}

({ static bool __attribute__((__section__(".data.once"))) __already_done; bool __ret_do_once = !!(1); if (__builtin_expect(!!(__ret_do_once && !__already_done), 0)) { __already_done = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("%s: unknown reg_state %d\n", dev->name, dev->reg_state); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/netdevice.h"), "i" (5073), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_do_once), 0); });
return " (unknown)";
}

__attribute__((__format__(printf, 3, 4))) __attribute__((__cold__))
void netdev_printk(const char *level, const struct net_device *dev,
const char *format, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void netdev_emerg(const struct net_device *dev, const char *format, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void netdev_alert(const struct net_device *dev, const char *format, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void netdev_crit(const struct net_device *dev, const char *format, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void netdev_err(const struct net_device *dev, const char *format, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void netdev_warn(const struct net_device *dev, const char *format, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void netdev_notice(const struct net_device *dev, const char *format, ...);
__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__))
void netdev_info(const struct net_device *dev, const char *format, ...);
# 5255 "./include/linux/netdevice.h"
extern struct list_head ptype_all ;
extern struct list_head ptype_base[(16)] ;

extern struct net_device *blackhole_netdev;
# 35 "net/ipv6/route.c" 2

# 1 "./include/linux/mroute6.h" 1





# 1 "./include/linux/pim.h" 1
# 33 "./include/linux/pim.h"
enum {
PIM_TYPE_HELLO,
PIM_TYPE_REGISTER,
PIM_TYPE_REGISTER_STOP,
PIM_TYPE_JOIN_PRUNE,
PIM_TYPE_BOOTSTRAP,
PIM_TYPE_ASSERT,
PIM_TYPE_GRAFT,
PIM_TYPE_GRAFT_ACK,
PIM_TYPE_CANDIDATE_RP_ADV
};
# 55 "./include/linux/pim.h"
struct pimhdr {
__u8 type;
__u8 reserved;
__be16 csum;
};


struct pimreghdr {
__u8 type;
__u8 reserved;
__be16 csum;
__be32 flags;
};

int pim_rcv_v1(struct sk_buff *skb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipmr_pimsm_enabled(void)
{
return 0 || 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pimhdr *pim_hdr(const struct sk_buff *skb)
{
return (struct pimhdr *)skb_transport_header(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 pim_hdr_version(const struct pimhdr *pimhdr)
{
return pimhdr->type >> 4;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 pim_hdr_type(const struct pimhdr *pimhdr)
{
return pimhdr->type & 0xf;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pim_ipv4_all_pim_routers(__be32 addr)
{
return addr == (( __be32)(__builtin_constant_p((__u32)((0xE000000D))) ? ((__u32)( (((__u32)((0xE000000D)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xE000000D)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xE000000D)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xE000000D)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xE000000D))));
}
# 7 "./include/linux/mroute6.h" 2


# 1 "./include/uapi/linux/mroute6.h" 1
# 48 "./include/uapi/linux/mroute6.h"
typedef unsigned long mifbitmap_t;
typedef unsigned short mifi_t;






typedef __u32 if_mask;


typedef struct if_set {
if_mask ifs_bits[(((256) + ((sizeof(if_mask) * 8)) - 1) / ((sizeof(if_mask) * 8)))];
} if_set;
# 74 "./include/uapi/linux/mroute6.h"
struct mif6ctl {
mifi_t mif6c_mifi;
unsigned char mif6c_flags;
unsigned char vifc_threshold;
__u16 mif6c_pifi;
unsigned int vifc_rate_limit;
};







struct mf6cctl {
struct sockaddr_in6 mf6cc_origin;
struct sockaddr_in6 mf6cc_mcastgrp;
mifi_t mf6cc_parent;
struct if_set mf6cc_ifset;
};





struct sioc_sg_req6 {
struct sockaddr_in6 src;
struct sockaddr_in6 grp;
unsigned long pktcnt;
unsigned long bytecnt;
unsigned long wrong_if;
};





struct sioc_mif_req6 {
mifi_t mifi;
unsigned long icount;
unsigned long ocount;
unsigned long ibytes;
unsigned long obytes;
};
# 133 "./include/uapi/linux/mroute6.h"
struct mrt6msg {




__u8 im6_mbz;
__u8 im6_msgtype;
__u16 im6_mif;
__u32 im6_pad;
struct in6_addr im6_src, im6_dst;
};


enum {
IP6MRA_CREPORT_UNSPEC,
IP6MRA_CREPORT_MSGTYPE,
IP6MRA_CREPORT_MIF_ID,
IP6MRA_CREPORT_SRC_ADDR,
IP6MRA_CREPORT_DST_ADDR,
IP6MRA_CREPORT_PKT,
__IP6MRA_CREPORT_MAX
};
# 10 "./include/linux/mroute6.h" 2
# 1 "./include/linux/mroute_base.h" 1







# 1 "./include/net/sock.h" 1
# 41 "./include/net/sock.h"
# 1 "./include/linux/list_nulls.h" 1
# 21 "./include/linux/list_nulls.h"
struct hlist_nulls_head {
struct hlist_nulls_node *first;
};

struct hlist_nulls_node {
struct hlist_nulls_node *next, **pprev;
};
# 43 "./include/linux/list_nulls.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int is_a_nulls(const struct hlist_nulls_node *ptr)
{
return ((unsigned long)ptr & 1);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
{
return ((unsigned long)ptr) >> 1;
}
# 67 "./include/linux/list_nulls.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
{
return !h->pprev;
}
# 81 "./include/linux/list_nulls.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int hlist_nulls_unhashed_lockless(const struct hlist_nulls_node *h)
{
return !({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_332(void) ; if (!((sizeof(h->pprev) == sizeof(char) || sizeof(h->pprev) == sizeof(short) || sizeof(h->pprev) == sizeof(int) || sizeof(h->pprev) == sizeof(long)) || sizeof(h->pprev) == sizeof(long long))) __compiletime_assert_332(); } while (0); (*(const volatile typeof( _Generic((h->pprev), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->pprev))) *)&(h->pprev)); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int hlist_nulls_empty(const struct hlist_nulls_head *h)
{
return is_a_nulls(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_333(void) ; if (!((sizeof(h->first) == sizeof(char) || sizeof(h->first) == sizeof(short) || sizeof(h->first) == sizeof(int) || sizeof(h->first) == sizeof(long)) || sizeof(h->first) == sizeof(long long))) __compiletime_assert_333(); } while (0); (*(const volatile typeof( _Generic((h->first), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->first))) *)&(h->first)); }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_nulls_add_head(struct hlist_nulls_node *n,
struct hlist_nulls_head *h)
{
struct hlist_nulls_node *first = h->first;

n->next = first;
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_334(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_334(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&h->first); } while (0); } while (0);
h->first = n;
if (!is_a_nulls(first))
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_335(void) ; if (!((sizeof(first->pprev) == sizeof(char) || sizeof(first->pprev) == sizeof(short) || sizeof(first->pprev) == sizeof(int) || sizeof(first->pprev) == sizeof(long)) || sizeof(first->pprev) == sizeof(long long))) __compiletime_assert_335(); } while (0); do { *(volatile typeof(first->pprev) *)&(first->pprev) = (&n->next); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __hlist_nulls_del(struct hlist_nulls_node *n)
{
struct hlist_nulls_node *next = n->next;
struct hlist_nulls_node **pprev = n->pprev;

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_336(void) ; if (!((sizeof(*pprev) == sizeof(char) || sizeof(*pprev) == sizeof(short) || sizeof(*pprev) == sizeof(int) || sizeof(*pprev) == sizeof(long)) || sizeof(*pprev) == sizeof(long long))) __compiletime_assert_336(); } while (0); do { *(volatile typeof(*pprev) *)&(*pprev) = (next); } while (0); } while (0);
if (!is_a_nulls(next))
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_337(void) ; if (!((sizeof(next->pprev) == sizeof(char) || sizeof(next->pprev) == sizeof(short) || sizeof(next->pprev) == sizeof(int) || sizeof(next->pprev) == sizeof(long)) || sizeof(next->pprev) == sizeof(long long))) __compiletime_assert_337(); } while (0); do { *(volatile typeof(next->pprev) *)&(next->pprev) = (pprev); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_nulls_del(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_338(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_338(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (((void *) 0x122 + 0)); } while (0); } while (0);
}
# 42 "./include/net/sock.h" 2
# 52 "./include/net/sock.h"
# 1 "./include/linux/page_counter.h" 1








struct page_counter {
atomic_long_t usage;
unsigned long min;
unsigned long low;
unsigned long high;
unsigned long max;


unsigned long emin;
atomic_long_t min_usage;
atomic_long_t children_min_usage;


unsigned long elow;
atomic_long_t low_usage;
atomic_long_t children_low_usage;


unsigned long watermark;
unsigned long failcnt;







struct page_counter *parent;
};







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_counter_init(struct page_counter *counter,
struct page_counter *parent)
{
atomic_long_set(&counter->usage, 0);
counter->max = (((long)(~0UL >> 1)) / ((1UL) << (12)));
counter->parent = parent;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long page_counter_read(struct page_counter *counter)
{
return atomic_long_read(&counter->usage);
}

void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
bool page_counter_try_charge(struct page_counter *counter,
unsigned long nr_pages,
struct page_counter **fail);
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_counter_set_high(struct page_counter *counter,
unsigned long nr_pages)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_339(void) ; if (!((sizeof(counter->high) == sizeof(char) || sizeof(counter->high) == sizeof(short) || sizeof(counter->high) == sizeof(int) || sizeof(counter->high) == sizeof(long)) || sizeof(counter->high) == sizeof(long long))) __compiletime_assert_339(); } while (0); do { *(volatile typeof(counter->high) *)&(counter->high) = (nr_pages); } while (0); } while (0);
}

int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
int page_counter_memparse(const char *buf, const char *max,
unsigned long *nr_pages);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void page_counter_reset_watermark(struct page_counter *counter)
{
counter->watermark = page_counter_read(counter);
}
# 53 "./include/net/sock.h" 2
# 1 "./include/linux/memcontrol.h" 1
# 18 "./include/linux/memcontrol.h"
# 1 "./include/linux/vmpressure.h" 1
# 11 "./include/linux/vmpressure.h"
# 1 "./include/linux/eventfd.h" 1
# 33 "./include/linux/eventfd.h"
struct eventfd_ctx;
struct file;



void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool eventfd_signal_allowed(void)
{
return !get_current()->in_eventfd_signal;
}
# 12 "./include/linux/vmpressure.h" 2

struct vmpressure {
unsigned long scanned;
unsigned long reclaimed;

unsigned long tree_scanned;
unsigned long tree_reclaimed;

spinlock_t sr_lock;


struct list_head events;

struct mutex events_lock;

struct work_struct work;
};

struct mem_cgroup;
# 47 "./include/linux/vmpressure.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
unsigned long scanned, unsigned long reclaimed) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg,
int prio) {}
# 19 "./include/linux/memcontrol.h" 2



# 1 "./include/linux/writeback.h" 1
# 11 "./include/linux/writeback.h"
# 1 "./include/linux/flex_proportions.h" 1
# 28 "./include/linux/flex_proportions.h"
struct fprop_global {

struct percpu_counter events;

unsigned int period;

seqcount_t sequence;
};

int fprop_global_init(struct fprop_global *p, gfp_t gfp);
void fprop_global_destroy(struct fprop_global *p);
bool fprop_new_period(struct fprop_global *p, int periods);




struct fprop_local_single {

unsigned long events;

unsigned int period;
raw_spinlock_t lock;
};





int fprop_local_init_single(struct fprop_local_single *pl);
void fprop_local_destroy_single(struct fprop_local_single *pl);
void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
void fprop_fraction_single(struct fprop_global *p,
struct fprop_local_single *pl, unsigned long *numerator,
unsigned long *denominator);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
{
unsigned long flags;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); if (!({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) trace_hardirqs_off(); } while (0);
__fprop_inc_single(p, pl);
do { if (!({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(flags); } while (0); } while (0);
}




struct fprop_local_percpu {

struct percpu_counter events;

unsigned int period;
raw_spinlock_t lock;
};

int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
long nr);
void __fprop_add_percpu_max(struct fprop_global *p,
struct fprop_local_percpu *pl, int max_frac, long nr);
void fprop_fraction_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl, unsigned long *numerator,
unsigned long *denominator);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
{
unsigned long flags;

do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); if (!({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) trace_hardirqs_off(); } while (0);
__fprop_add_percpu(p, pl, 1);
do { if (!({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(flags); } while (0); } while (0);
}
# 12 "./include/linux/writeback.h" 2
# 1 "./include/linux/backing-dev-defs.h" 1
# 17 "./include/linux/backing-dev-defs.h"
struct page;
struct device;
struct dentry;




enum wb_state {
WB_registered,
WB_writeback_running,
WB_has_dirty_io,
WB_start_all,
};

enum wb_congested_state {
WB_async_congested,
WB_sync_congested,
};

enum wb_stat_item {
WB_RECLAIMABLE,
WB_WRITEBACK,
WB_DIRTIED,
WB_WRITTEN,
NR_WB_STAT_ITEMS
};






enum wb_reason {
WB_REASON_BACKGROUND,
WB_REASON_VMSCAN,
WB_REASON_SYNC,
WB_REASON_PERIODIC,
WB_REASON_LAPTOP_TIMER,
WB_REASON_FS_FREE_SPACE,






WB_REASON_FORKER_THREAD,
WB_REASON_FOREIGN_FLUSH,

WB_REASON_MAX,
};

struct wb_completion {
atomic_t cnt;
wait_queue_head_t *waitq;
};
# 110 "./include/linux/backing-dev-defs.h"
struct bdi_writeback {
struct backing_dev_info *bdi;

unsigned long state;
unsigned long last_old_flush;

struct list_head b_dirty;
struct list_head b_io;
struct list_head b_more_io;
struct list_head b_dirty_time;
spinlock_t list_lock;

atomic_t writeback_inodes;
struct percpu_counter stat[NR_WB_STAT_ITEMS];

unsigned long congested;

unsigned long bw_time_stamp;
unsigned long dirtied_stamp;
unsigned long written_stamp;
unsigned long write_bandwidth;
unsigned long avg_write_bandwidth;







unsigned long dirty_ratelimit;
unsigned long balanced_dirty_ratelimit;

struct fprop_local_percpu completions;
int dirty_exceeded;
enum wb_reason start_all_reason;

spinlock_t work_lock;
struct list_head work_list;
struct delayed_work dwork;
struct delayed_work bw_dwork;

unsigned long dirty_sleep;

struct list_head bdi_node;
# 170 "./include/linux/backing-dev-defs.h"
};

struct backing_dev_info {
u64 id;
struct rb_node rb_node;
struct list_head bdi_list;
unsigned long ra_pages;
unsigned long io_pages;

struct kref refcnt;
unsigned int capabilities;
unsigned int min_ratio;
unsigned int max_ratio, max_prop_frac;





atomic_long_t tot_write_bandwidth;

struct bdi_writeback wb;
struct list_head wb_list;





wait_queue_head_t wb_waitq;

struct device *dev;
char dev_name[64];
struct device *owner;

struct timer_list laptop_mode_wb_timer;


struct dentry *debug_dir;

};

struct wb_lock_cookie {
bool locked;
unsigned long flags;
};
# 279 "./include/linux/backing-dev-defs.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool wb_tryget(struct bdi_writeback *wb)
{
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wb_get(struct bdi_writeback *wb)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wb_put(struct bdi_writeback *wb)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool wb_dying(struct bdi_writeback *wb)
{
return false;
}
# 13 "./include/linux/writeback.h" 2
# 1 "./include/linux/blk_types.h" 1
# 14 "./include/linux/blk_types.h"
struct bio_set;
struct bio;
struct bio_integrity_payload;
struct page;
struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
struct bio_crypt_ctx;
# 40 "./include/linux/blk_types.h"
struct block_device {
sector_t bd_start_sect;
sector_t bd_nr_sectors;
struct disk_stats *bd_stats;
unsigned long bd_stamp;
bool bd_read_only;
dev_t bd_dev;
int bd_openers;
struct inode * bd_inode;
struct super_block * bd_super;
void * bd_claiming;
struct device bd_device;
void * bd_holder;
int bd_holders;
bool bd_write_holder;
struct kobject *bd_holder_dir;
u8 bd_partno;
spinlock_t bd_size_lock;
struct gendisk * bd_disk;
struct request_queue * bd_queue;


int bd_fsfreeze_count;

struct mutex bd_fsfreeze_mutex;
struct super_block *bd_fsfreeze_sb;

struct partition_meta_info *bd_meta_info;



} ;
# 90 "./include/linux/blk_types.h"
typedef u8 blk_status_t;
typedef u16 blk_short_t;
# 177 "./include/linux/blk_types.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool blk_path_error(blk_status_t error)
{
switch (error) {
case (( blk_status_t)1):
case (( blk_status_t)3):
case (( blk_status_t)5):
case (( blk_status_t)6):
case (( blk_status_t)7):
case (( blk_status_t)8):
return false;
}


return true;
}
# 211 "./include/linux/blk_types.h"
struct bio_issue {
u64 value;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 __bio_issue_time(u64 time)
{
return time & ((1ULL << ((64 - 1) - 12)) - 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 bio_issue_time(struct bio_issue *issue)
{
return __bio_issue_time(issue->value);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) sector_t bio_issue_size(struct bio_issue *issue)
{
return ((issue->value & (((1ULL << 12) - 1) << ((64 - 1) - 12))) >> ((64 - 1) - 12));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bio_issue_init(struct bio_issue *issue,
sector_t size)
{
size &= (1ULL << 12) - 1;
issue->value = ((issue->value & (~((1ULL << (64 - 1)) - 1))) |
(ktime_get_ns() & ((1ULL << ((64 - 1) - 12)) - 1)) |
((u64)size << ((64 - 1) - 12)));
}

typedef unsigned int blk_qc_t;






struct bio {
struct bio *bi_next;
struct block_device *bi_bdev;
unsigned int bi_opf;



unsigned short bi_flags;
unsigned short bi_ioprio;
blk_status_t bi_status;
atomic_t __bi_remaining;

struct bvec_iter bi_iter;

blk_qc_t bi_cookie;
bio_end_io_t *bi_end_io;
void *bi_private;
# 281 "./include/linux/blk_types.h"
union {



};

unsigned short bi_vcnt;





unsigned short bi_max_vecs;

atomic_t __bi_cnt;

struct bio_vec *bi_io_vec;

struct bio_set *bi_pool;






struct bio_vec bi_inline_vecs[];
};







enum {
BIO_NO_PAGE_REF,
BIO_CLONED,
BIO_BOUNCED,
BIO_WORKINGSET,
BIO_QUIET,
BIO_CHAIN,
BIO_REFFED,
BIO_THROTTLED,

BIO_TRACE_COMPLETION,

BIO_CGROUP_ACCT,
BIO_QOS_THROTTLED,
BIO_QOS_MERGED,
BIO_REMAPPED,
BIO_ZONE_WRITE_LOCKED,
BIO_PERCPU_CACHE,
BIO_FLAG_LAST
};

typedef __u32 blk_mq_req_flags_t;
# 355 "./include/linux/blk_types.h"
enum req_opf {

REQ_OP_READ = 0,

REQ_OP_WRITE = 1,

REQ_OP_FLUSH = 2,

REQ_OP_DISCARD = 3,

REQ_OP_SECURE_ERASE = 5,

REQ_OP_WRITE_ZEROES = 9,

REQ_OP_ZONE_OPEN = 10,

REQ_OP_ZONE_CLOSE = 11,

REQ_OP_ZONE_FINISH = 12,

REQ_OP_ZONE_APPEND = 13,

REQ_OP_ZONE_RESET = 15,

REQ_OP_ZONE_RESET_ALL = 17,


REQ_OP_DRV_IN = 34,
REQ_OP_DRV_OUT = 35,

REQ_OP_LAST,
};

enum req_flag_bits {
__REQ_FAILFAST_DEV =
8,
__REQ_FAILFAST_TRANSPORT,
__REQ_FAILFAST_DRIVER,
__REQ_SYNC,
__REQ_META,
__REQ_PRIO,
__REQ_NOMERGE,
__REQ_IDLE,
__REQ_INTEGRITY,
__REQ_FUA,
__REQ_PREFLUSH,
__REQ_RAHEAD,
__REQ_BACKGROUND,
__REQ_NOWAIT,







__REQ_CGROUP_PUNT,


__REQ_NOUNMAP,

__REQ_POLLED,


__REQ_DRV,
__REQ_SWAP,
__REQ_NR_BITS,
};
# 452 "./include/linux/blk_types.h"
enum stat_group {
STAT_READ,
STAT_WRITE,
STAT_DISCARD,
STAT_FLUSH,

NR_STAT_GROUPS
};





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bio_set_op_attrs(struct bio *bio, unsigned op,
unsigned op_flags)
{
bio->bi_opf = op | op_flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool op_is_write(unsigned int op)
{
return (op & 1);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool op_is_flush(unsigned int op)
{
return op & ((1ULL << __REQ_FUA) | (1ULL << __REQ_PREFLUSH));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool op_is_sync(unsigned int op)
{
return (op & ((1 << 8) - 1)) == REQ_OP_READ ||
(op & ((1ULL << __REQ_SYNC) | (1ULL << __REQ_FUA) | (1ULL << __REQ_PREFLUSH)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool op_is_discard(unsigned int op)
{
return (op & ((1 << 8) - 1)) == REQ_OP_DISCARD;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool op_is_zone_mgmt(enum req_opf op)
{
switch (op & ((1 << 8) - 1)) {
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
return true;
default:
return false;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int op_stat_group(unsigned int op)
{
if (op_is_discard(op))
return STAT_DISCARD;
return op_is_write(op);
}

struct blk_rq_stat {
u64 mean;
u64 min;
u64 max;
u32 nr_samples;
u64 batch;
};
# 14 "./include/linux/writeback.h" 2

struct bio;

extern __attribute__((section(".data..percpu" ""))) __typeof__(int) dirty_throttle_leaks;
# 35 "./include/linux/writeback.h"
struct backing_dev_info;




enum writeback_sync_modes {
WB_SYNC_NONE,
WB_SYNC_ALL,
};






struct writeback_control {
long nr_to_write;

long pages_skipped;






loff_t range_start;
loff_t range_end;

enum writeback_sync_modes sync_mode;

unsigned for_kupdate:1;
unsigned for_background:1;
unsigned tagged_writepages:1;
unsigned for_reclaim:1;
unsigned range_cyclic:1;
unsigned for_sync:1;
unsigned unpinned_fscache_wb:1;







unsigned no_cgroup_owner:1;

unsigned punt_to_cgroup:1;
# 95 "./include/linux/writeback.h"
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int wbc_to_write_flags(struct writeback_control *wbc)
{
int flags = 0;

if (wbc->punt_to_cgroup)
flags = (1ULL << __REQ_CGROUP_PUNT);

if (wbc->sync_mode == WB_SYNC_ALL)
flags |= (1ULL << __REQ_SYNC);
else if (wbc->for_kupdate || wbc->for_background)
flags |= (1ULL << __REQ_BACKGROUND);

return flags;
}
# 126 "./include/linux/writeback.h"
struct wb_domain {
spinlock_t lock;
# 146 "./include/linux/writeback.h"
struct fprop_global completions;
struct timer_list period_timer;
unsigned long period_time;
# 160 "./include/linux/writeback.h"
unsigned long dirty_limit_tstamp;
unsigned long dirty_limit;
};
# 176 "./include/linux/writeback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wb_domain_size_changed(struct wb_domain *dom)
{
spin_lock(&dom->lock);
dom->dirty_limit_tstamp = jiffies;
dom->dirty_limit = 0;
spin_unlock(&dom->lock);
}




struct bdi_writeback;
void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason);
void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason);
void sync_inodes_sb(struct super_block *);
void wakeup_flusher_threads(enum wb_reason reason);
void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);
void inode_io_list_del(struct inode *inode);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wait_on_inode(struct inode *inode)
{
do { __might_sleep("include/linux/writeback.h", 202); __cond_resched(); } while (0);
wait_on_bit(&inode->i_state, 3, 0x0002);
}
# 294 "./include/linux/writeback.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_attach_wb(struct inode *inode, struct page *page)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inode_detach_wb(struct inode *inode)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
struct inode *inode)

{
spin_unlock(&inode->i_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
struct inode *inode)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wbc_detach_inode(struct writeback_control *wbc)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void wbc_account_cgroup_owner(struct writeback_control *wbc,
struct page *page, size_t bytes)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cgroup_writeback_umount(void)
{
}






void laptop_io_completion(struct backing_dev_info *info);
void laptop_sync_completion(void);
void laptop_mode_timer_fn(struct timer_list *t);
bool node_dirty_ok(struct pglist_data *pgdat);
int wb_domain_init(struct wb_domain *dom, gfp_t gfp);




extern struct wb_domain global_wb_domain;


extern int dirty_background_ratio;
extern unsigned long dirty_background_bytes;
extern int vm_dirty_ratio;
extern unsigned long vm_dirty_bytes;
extern unsigned int dirty_writeback_interval;
extern unsigned int dirty_expire_interval;
extern unsigned int dirtytime_expire_interval;
extern int vm_highmem_is_dirtyable;
extern int laptop_mode;

int dirty_background_ratio_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int dirty_background_bytes_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int dirty_ratio_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int dirty_bytes_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int dirtytime_interval_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);

void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);

void wb_update_bandwidth(struct bdi_writeback *wb);
void balance_dirty_pages_ratelimited(struct address_space *mapping);
bool wb_over_bg_thresh(struct bdi_writeback *wb);

typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
void *data);

int generic_writepages(struct address_space *mapping,
struct writeback_control *wbc);
void tag_pages_for_writeback(struct address_space *mapping,
unsigned long start, unsigned long end);
int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
void *data);
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
void writeback_set_ratelimit(void);
void tag_pages_for_writeback(struct address_space *mapping,
unsigned long start, unsigned long end);

bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio);
void folio_account_redirty(struct folio *folio);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void account_page_redirty(struct page *page)
{
folio_account_redirty((_Generic((page), const struct page *: (const struct folio *)_compound_head(page), struct page *: (struct folio *)_compound_head(page))));
}
bool folio_redirty_for_writepage(struct writeback_control *, struct folio *);
bool redirty_page_for_writepage(struct writeback_control *, struct page *);

void sb_mark_inode_writeback(struct inode *inode);
void sb_clear_inode_writeback(struct inode *inode);
# 23 "./include/linux/memcontrol.h" 2


struct mem_cgroup;
struct obj_cgroup;
struct page;
struct mm_struct;
struct kmem_cache;


enum memcg_stat_item {
MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
MEMCG_SOCK,
MEMCG_PERCPU_B,
MEMCG_VMALLOC,
MEMCG_KMEM,
MEMCG_NR_STAT,
};

enum memcg_memory_event {
MEMCG_LOW,
MEMCG_HIGH,
MEMCG_MAX,
MEMCG_OOM,
MEMCG_OOM_KILL,
MEMCG_OOM_GROUP_KILL,
MEMCG_SWAP_HIGH,
MEMCG_SWAP_MAX,
MEMCG_SWAP_FAIL,
MEMCG_NR_MEMORY_EVENTS,
};

struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
unsigned int generation;
};
# 1130 "./include/linux/memcontrol.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_cgroup *folio_memcg(struct folio *folio)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_cgroup *page_memcg(struct page *page)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
{
({ int __ret_warn_on = !!(!rcu_read_lock_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/memcontrol.h"), "i" (1142), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_cgroup *page_memcg_check(struct page *page)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_memcg_kmem(struct folio *folio)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool PageMemcgKmem(struct page *page)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mem_cgroup_disabled(void)
{
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memcg_memory_event_mm(struct mm_struct *mm,
enum memcg_memory_event event)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg,
unsigned long *min,
unsigned long *low)
{
*min = *low = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_calculate_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mem_cgroup_below_low(struct mem_cgroup *memcg)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mem_cgroup_below_min(struct mem_cgroup *memcg)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mem_cgroup_charge(struct folio *folio,
struct mm_struct *mm, gfp_t gfp)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mem_cgroup_swapin_charge_page(struct page *page,
struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_uncharge(struct folio *folio)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_uncharge_list(struct list_head *page_list)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
struct pglist_data *pgdat)
{
return &pgdat->__lruvec;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct lruvec *folio_lruvec(struct folio *folio)
{
struct pglist_data *pgdat = folio_pgdat(folio);
return &pgdat->__lruvec;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mm_match_cgroup(struct mm_struct *mm,
struct mem_cgroup *memcg)
{
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_put(struct mem_cgroup *memcg)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct lruvec *folio_lruvec_lock(struct folio *folio)
{
struct pglist_data *pgdat = folio_pgdat(folio);

spin_lock(&pgdat->__lruvec.lru_lock);
return &pgdat->__lruvec;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
{
struct pglist_data *pgdat = folio_pgdat(folio);

spin_lock_irq(&pgdat->__lruvec.lru_lock);
return &pgdat->__lruvec;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flagsp)
{
struct pglist_data *pgdat = folio_pgdat(folio);

do { do { ({ unsigned long __dummy; typeof(*flagsp) __dummy2; (void)(&__dummy == &__dummy2); 1; }); *flagsp = _raw_spin_lock_irqsave(spinlock_check(&pgdat->__lruvec.lru_lock)); } while (0); } while (0);
return &pgdat->__lruvec;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
struct mem_cgroup_reclaim_cookie *reclaim)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_iter_break(struct mem_cgroup *root,
struct mem_cgroup *prev)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
int (*fn)(struct task_struct *, void *), void *arg)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{
({ int __ret_warn_on = !!(id); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/memcontrol.h"), "i" (1326), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });

return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mem_cgroup_online(struct mem_cgroup *memcg)
{
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
enum lru_list lru, int zone_idx)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lock_page_memcg(struct page *page)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void unlock_page_memcg(struct page *page)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_memcg_lock(struct folio *folio)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void folio_memcg_unlock(struct folio *folio)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_handle_over_high(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_enter_user_fault(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_exit_user_fault(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool task_in_memcg_oom(struct task_struct *p)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mem_cgroup_oom_synchronize(bool wait)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_cgroup *mem_cgroup_get_oom_group(
struct task_struct *victim, struct mem_cgroup *oom_domain)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __mod_memcg_state(struct mem_cgroup *memcg,
int idx,
int nr)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mod_memcg_state(struct mem_cgroup *memcg,
int idx,
int nr)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mod_memcg_page_state(struct page *page,
int idx, int val)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
return global_node_page_state(idx);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long lruvec_page_state_local(struct lruvec *lruvec,
enum node_stat_item idx)
{
return global_node_page_state(idx);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_flush_stats(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_flush_stats_delayed(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __mod_memcg_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
struct page *page = virt_to_head_page(p);

__mod_node_page_state(page_pgdat(page), idx, val);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
struct page *page = virt_to_head_page(p);

mod_node_page_state(page_pgdat(page), idx, val);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
unsigned long count)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
unsigned long count)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void count_memcg_page_event(struct page *page,
int idx)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void split_page_memcg(struct page *head, unsigned int nr)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned)
{
return 0;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
__mod_lruvec_kmem_state(p, idx, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
__mod_lruvec_kmem_state(p, idx, -1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct lruvec *parent_lruvec(struct lruvec *lruvec)
{
struct mem_cgroup *memcg;

memcg = lruvec_memcg(lruvec);
if (!memcg)
return ((void *)0);
memcg = parent_mem_cgroup(memcg);
if (!memcg)
return ((void *)0);
return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void unlock_page_lruvec(struct lruvec *lruvec)
{
spin_unlock(&lruvec->lru_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void unlock_page_lruvec_irq(struct lruvec *lruvec)
{
spin_unlock_irq(&lruvec->lru_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
unsigned long flags)
{
spin_unlock_irqrestore(&lruvec->lru_lock, flags);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool folio_matches_lruvec(struct folio *folio,
struct lruvec *lruvec)
{
return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
lruvec_memcg(lruvec) == folio_memcg(folio);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
struct lruvec *locked_lruvec)
{
if (locked_lruvec) {
if (folio_matches_lruvec(folio, locked_lruvec))
return locked_lruvec;

unlock_page_lruvec_irq(locked_lruvec);
}

return folio_lruvec_lock_irq(folio);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
struct lruvec *locked_lruvec, unsigned long *flags)
{
if (locked_lruvec) {
if (folio_matches_lruvec(folio, locked_lruvec))
return locked_lruvec;

unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
}

return folio_lruvec_lock_irqsave(folio, flags);
}
# 1618 "./include/linux/memcontrol.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_wb_stats(struct bdi_writeback *wb,
unsigned long *pfilepages,
unsigned long *pheadroom,
unsigned long *pdirty,
unsigned long *pwriteback)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
{
}



struct sock;
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
gfp_t gfp_mask);
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
# 1668 "./include/linux/memcontrol.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_sk_alloc(struct sock *sk) { };
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mem_cgroup_sk_free(struct sock *sk) { };
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id)
{
}
# 1724 "./include/linux/memcontrol.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mem_cgroup_kmem_disabled(void)
{
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
int order)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memcg_kmem_uncharge_page(struct page *page, int order)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
int order)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __memcg_kmem_uncharge_page(struct page *page, int order)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool memcg_kmem_enabled(void)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int memcg_kmem_id(struct mem_cgroup *memcg)
{
return -1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_cgroup *mem_cgroup_from_obj(void *p)
{
return ((void *)0);
}
# 54 "./include/net/sock.h" 2
# 1 "./include/linux/static_key.h" 1
# 55 "./include/net/sock.h" 2




# 1 "./include/linux/rculist_nulls.h" 1
# 33 "./include/linux/rculist_nulls.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
{
if (!hlist_nulls_unhashed(n)) {
__hlist_nulls_del(n);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_340(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_340(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (((void *)0)); } while (0); } while (0);
}
}
# 74 "./include/linux/rculist_nulls.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_341(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_341(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (((void *) 0x122 + 0)); } while (0); } while (0);
}
# 99 "./include/linux/rculist_nulls.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_head *h)
{
struct hlist_nulls_node *first = h->first;

n->next = first;
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_342(void) ; if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_342(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&h->first); } while (0); } while (0);
do { uintptr_t _r_a_p__v = (uintptr_t)(n); ; if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_343(void) ; if (!((sizeof(((*((struct hlist_nulls_node **)&(h)->first)))) == sizeof(char) || sizeof(((*((struct hlist_nulls_node **)&(h)->first)))) == sizeof(short) || sizeof(((*((struct hlist_nulls_node **)&(h)->first)))) == sizeof(int) || sizeof(((*((struct hlist_nulls_node **)&(h)->first)))) == sizeof(long)) || sizeof(((*((struct hlist_nulls_node **)&(h)->first)))) == sizeof(long long))) __compiletime_assert_343(); } while (0); do { *(volatile typeof(((*((struct hlist_nulls_node **)&(h)->first)))) *)&(((*((struct hlist_nulls_node **)&(h)->first)))) = ((typeof((*((struct hlist_nulls_node **)&(h)->first))))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_344(void) ; if (!((sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(char) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(short) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(int) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(long)))) __compiletime_assert_344(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_345(void) ; if (!((sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(char) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(short) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(int) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(long)) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(long long))) __compiletime_assert_345(); } while (0); do { *(volatile typeof(*&(*((struct hlist_nulls_node **)&(h)->first))) *)&(*&(*((struct hlist_nulls_node **)&(h)->first))) = ((typeof(*((typeof((*((struct hlist_nulls_node **)&(h)->first))))_r_a_p__v)) *)((typeof((*((struct hlist_nulls_node **)&(h)->first))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
if (!is_a_nulls(first))
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_346(void) ; if (!((sizeof(first->pprev) == sizeof(char) || sizeof(first->pprev) == sizeof(short) || sizeof(first->pprev) == sizeof(int) || sizeof(first->pprev) == sizeof(long)) || sizeof(first->pprev) == sizeof(long long))) __compiletime_assert_346(); } while (0); do { *(volatile typeof(first->pprev) *)&(first->pprev) = (&n->next); } while (0); } while (0);
}
# 130 "./include/linux/rculist_nulls.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_head *h)
{
struct hlist_nulls_node *i, *last = ((void *)0);


for (i = h->first; !is_a_nulls(i); i = i->next)
last = i;

if (last) {
n->next = last->next;
n->pprev = &last->next;
do { uintptr_t _r_a_p__v = (uintptr_t)(n); ; if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_347(void) ; if (!((sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(char) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(short) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(int) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(long)) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(long long))) __compiletime_assert_347(); } while (0); do { *(volatile typeof(((*((struct hlist_node **)(&(last)->next))))) *)&(((*((struct hlist_node **)(&(last)->next))))) = ((typeof((*((struct hlist_node **)(&(last)->next)))))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_348(void) ; if (!((sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(long)))) __compiletime_assert_348(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_349(void) ; if (!((sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(long)) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(long long))) __compiletime_assert_349(); } while (0); do { *(volatile typeof(*&(*((struct hlist_node **)(&(last)->next)))) *)&(*&(*((struct hlist_node **)(&(last)->next)))) = ((typeof(*((typeof((*((struct hlist_node **)(&(last)->next)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)(&(last)->next)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
} else {
hlist_nulls_add_head_rcu(n, h);
}
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void hlist_nulls_add_fake(struct hlist_nulls_node *n)
{
n->pprev = &n->next;
n->next = (struct hlist_nulls_node *)(1UL | (((long)((void *)0)) << 1));
}
# 60 "./include/net/sock.h" 2
# 1 "./include/linux/poll.h" 1
# 12 "./include/linux/poll.h"
# 1 "./include/uapi/linux/poll.h" 1
# 1 "./arch/riscv/include/generated/uapi/asm/poll.h" 1
# 1 "./include/uapi/asm-generic/poll.h" 1
# 36 "./include/uapi/asm-generic/poll.h"
struct pollfd {
int fd;
short events;
short revents;
};
# 2 "./arch/riscv/include/generated/uapi/asm/poll.h" 2
# 2 "./include/uapi/linux/poll.h" 2
# 13 "./include/linux/poll.h" 2
# 1 "./include/uapi/linux/eventpoll.h" 1
# 77 "./include/uapi/linux/eventpoll.h"
struct epoll_event {
__poll_t events;
__u64 data;
} ;
# 89 "./include/uapi/linux/eventpoll.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ep_take_care_of_epollwakeup(struct epoll_event *epev)
{
epev->events &= ~(( __poll_t)(1U << 29));
}
# 14 "./include/linux/poll.h" 2
# 30 "./include/linux/poll.h"
struct poll_table_struct;




typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);





typedef struct poll_table_struct {
poll_queue_proc _qproc;
__poll_t _key;
} poll_table;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
if (p && p->_qproc && wait_address)
p->_qproc(filp, wait_address, p);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool poll_does_not_wait(const poll_table *p)
{
return p == ((void *)0) || p->_qproc == ((void *)0);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __poll_t poll_requested_events(const poll_table *p)
{
return p ? p->_key : ~(__poll_t)0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
{
pt->_qproc = qproc;
pt->_key = ~(__poll_t)0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool file_can_poll(struct file *file)
{
return file->f_op->poll;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
{
if (__builtin_expect(!!(!file->f_op->poll), 0))
return (( __poll_t)0x00000001 | ( __poll_t)0x00000004 | ( __poll_t)0x00000040 | ( __poll_t)0x00000100);
return file->f_op->poll(file, pt);
}

struct poll_table_entry {
struct file *filp;
__poll_t key;
wait_queue_entry_t wait;
wait_queue_head_t *wait_address;
};




struct poll_wqueues {
poll_table pt;
struct poll_table_page *table;
struct task_struct *polling_task;
int triggered;
int error;
int inline_index;
struct poll_table_entry inline_entries[((768 - 256) / sizeof(struct poll_table_entry))];
};

extern void poll_initwait(struct poll_wqueues *pwq);
extern void poll_freewait(struct poll_wqueues *pwq);
extern u64 select_estimate_accuracy(struct timespec64 *tv);



extern int core_sys_select(int n, fd_set *inp, fd_set *outp,
fd_set *exp, struct timespec64 *end_time);

extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec,
long nsec);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u16 mangle_poll(__poll_t val)
{
__u16 v = ( __u16)val;

return (( __u16)( __poll_t)0x00000001 < 0x0001 ? (v & ( __u16)( __poll_t)0x00000001) * (0x0001/( __u16)( __poll_t)0x00000001) : (v & ( __u16)( __poll_t)0x00000001) / (( __u16)( __poll_t)0x00000001/0x0001)) | (( __u16)( __poll_t)0x00000004 < 0x0004 ? (v & ( __u16)( __poll_t)0x00000004) * (0x0004/( __u16)( __poll_t)0x00000004) : (v & ( __u16)( __poll_t)0x00000004) / (( __u16)( __poll_t)0x00000004/0x0004)) | (( __u16)( __poll_t)0x00000002 < 0x0002 ? (v & ( __u16)( __poll_t)0x00000002) * (0x0002/( __u16)( __poll_t)0x00000002) : (v & ( __u16)( __poll_t)0x00000002) / (( __u16)( __poll_t)0x00000002/0x0002)) | (( __u16)( __poll_t)0x00000008 < 0x0008 ? (v & ( __u16)( __poll_t)0x00000008) * (0x0008/( __u16)( __poll_t)0x00000008) : (v & ( __u16)( __poll_t)0x00000008) / (( __u16)( __poll_t)0x00000008/0x0008)) | (( __u16)( __poll_t)0x00000020 < 0x0020 ? (v & ( __u16)( __poll_t)0x00000020) * (0x0020/( __u16)( __poll_t)0x00000020) : (v & ( __u16)( __poll_t)0x00000020) / (( __u16)( __poll_t)0x00000020/0x0020)) |
(( __u16)( __poll_t)0x00000040 < 0x0040 ? (v & ( __u16)( __poll_t)0x00000040) * (0x0040/( __u16)( __poll_t)0x00000040) : (v & ( __u16)( __poll_t)0x00000040) / (( __u16)( __poll_t)0x00000040/0x0040)) | (( __u16)( __poll_t)0x00000080 < 0x0080 ? (v & ( __u16)( __poll_t)0x00000080) * (0x0080/( __u16)( __poll_t)0x00000080) : (v & ( __u16)( __poll_t)0x00000080) / (( __u16)( __poll_t)0x00000080/0x0080)) | (( __u16)( __poll_t)0x00000100 < 0x0100 ? (v & ( __u16)( __poll_t)0x00000100) * (0x0100/( __u16)( __poll_t)0x00000100) : (v & ( __u16)( __poll_t)0x00000100) / (( __u16)( __poll_t)0x00000100/0x0100)) | (( __u16)( __poll_t)0x00000200 < 0x0200 ? (v & ( __u16)( __poll_t)0x00000200) * (0x0200/( __u16)( __poll_t)0x00000200) : (v & ( __u16)( __poll_t)0x00000200) / (( __u16)( __poll_t)0x00000200/0x0200)) |
(( __u16)( __poll_t)0x00000010 < 0x0010 ? (v & ( __u16)( __poll_t)0x00000010) * (0x0010/( __u16)( __poll_t)0x00000010) : (v & ( __u16)( __poll_t)0x00000010) / (( __u16)( __poll_t)0x00000010/0x0010)) | (( __u16)( __poll_t)0x00002000 < 0x2000 ? (v & ( __u16)( __poll_t)0x00002000) * (0x2000/( __u16)( __poll_t)0x00002000) : (v & ( __u16)( __poll_t)0x00002000) / (( __u16)( __poll_t)0x00002000/0x2000)) | (( __u16)( __poll_t)0x00000400 < 0x0400 ? (v & ( __u16)( __poll_t)0x00000400) * (0x0400/( __u16)( __poll_t)0x00000400) : (v & ( __u16)( __poll_t)0x00000400) / (( __u16)( __poll_t)0x00000400/0x0400));

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __poll_t demangle_poll(u16 val)
{

return ( __poll_t)(0x0001 < ( __u16)( __poll_t)0x00000001 ? (val & 0x0001) * (( __u16)( __poll_t)0x00000001/0x0001) : (val & 0x0001) / (0x0001/( __u16)( __poll_t)0x00000001)) | ( __poll_t)(0x0004 < ( __u16)( __poll_t)0x00000004 ? (val & 0x0004) * (( __u16)( __poll_t)0x00000004/0x0004) : (val & 0x0004) / (0x0004/( __u16)( __poll_t)0x00000004)) | ( __poll_t)(0x0002 < ( __u16)( __poll_t)0x00000002 ? (val & 0x0002) * (( __u16)( __poll_t)0x00000002/0x0002) : (val & 0x0002) / (0x0002/( __u16)( __poll_t)0x00000002)) | ( __poll_t)(0x0008 < ( __u16)( __poll_t)0x00000008 ? (val & 0x0008) * (( __u16)( __poll_t)0x00000008/0x0008) : (val & 0x0008) / (0x0008/( __u16)( __poll_t)0x00000008)) | ( __poll_t)(0x0020 < ( __u16)( __poll_t)0x00000020 ? (val & 0x0020) * (( __u16)( __poll_t)0x00000020/0x0020) : (val & 0x0020) / (0x0020/( __u16)( __poll_t)0x00000020)) |
( __poll_t)(0x0040 < ( __u16)( __poll_t)0x00000040 ? (val & 0x0040) * (( __u16)( __poll_t)0x00000040/0x0040) : (val & 0x0040) / (0x0040/( __u16)( __poll_t)0x00000040)) | ( __poll_t)(0x0080 < ( __u16)( __poll_t)0x00000080 ? (val & 0x0080) * (( __u16)( __poll_t)0x00000080/0x0080) : (val & 0x0080) / (0x0080/( __u16)( __poll_t)0x00000080)) | ( __poll_t)(0x0100 < ( __u16)( __poll_t)0x00000100 ? (val & 0x0100) * (( __u16)( __poll_t)0x00000100/0x0100) : (val & 0x0100) / (0x0100/( __u16)( __poll_t)0x00000100)) | ( __poll_t)(0x0200 < ( __u16)( __poll_t)0x00000200 ? (val & 0x0200) * (( __u16)( __poll_t)0x00000200/0x0200) : (val & 0x0200) / (0x0200/( __u16)( __poll_t)0x00000200)) |
( __poll_t)(0x0010 < ( __u16)( __poll_t)0x00000010 ? (val & 0x0010) * (( __u16)( __poll_t)0x00000010/0x0010) : (val & 0x0010) / (0x0010/( __u16)( __poll_t)0x00000010)) | ( __poll_t)(0x2000 < ( __u16)( __poll_t)0x00002000 ? (val & 0x2000) * (( __u16)( __poll_t)0x00002000/0x2000) : (val & 0x2000) / (0x2000/( __u16)( __poll_t)0x00002000)) | ( __poll_t)(0x0400 < ( __u16)( __poll_t)0x00000400 ? (val & 0x0400) * (( __u16)( __poll_t)0x00000400/0x0400) : (val & 0x0400) / (0x0400/( __u16)( __poll_t)0x00000400));

}
# 61 "./include/net/sock.h" 2

# 1 "./include/linux/indirect_call_wrapper.h" 1
# 63 "./include/net/sock.h" 2



# 1 "./include/net/dst.h" 1
# 14 "./include/net/dst.h"
# 1 "./include/linux/rtnetlink.h" 1
# 10 "./include/linux/rtnetlink.h"
# 1 "./include/uapi/linux/rtnetlink.h" 1







# 1 "./include/uapi/linux/if_addr.h" 1







struct ifaddrmsg {
__u8 ifa_family;
__u8 ifa_prefixlen;
__u8 ifa_flags;
__u8 ifa_scope;
__u32 ifa_index;
};
# 26 "./include/uapi/linux/if_addr.h"
enum {
IFA_UNSPEC,
IFA_ADDRESS,
IFA_LOCAL,
IFA_LABEL,
IFA_BROADCAST,
IFA_ANYCAST,
IFA_CACHEINFO,
IFA_MULTICAST,
IFA_FLAGS,
IFA_RT_PRIORITY,
IFA_TARGET_NETNSID,
IFA_PROTO,
__IFA_MAX,
};
# 60 "./include/uapi/linux/if_addr.h"
struct ifa_cacheinfo {
__u32 ifa_prefered;
__u32 ifa_valid;
__u32 cstamp;
__u32 tstamp;
};
# 9 "./include/uapi/linux/rtnetlink.h" 2
# 24 "./include/uapi/linux/rtnetlink.h"
enum {
RTM_BASE = 16,


RTM_NEWLINK = 16,

RTM_DELLINK,

RTM_GETLINK,

RTM_SETLINK,


RTM_NEWADDR = 20,

RTM_DELADDR,

RTM_GETADDR,


RTM_NEWROUTE = 24,

RTM_DELROUTE,

RTM_GETROUTE,


RTM_NEWNEIGH = 28,

RTM_DELNEIGH,

RTM_GETNEIGH,


RTM_NEWRULE = 32,

RTM_DELRULE,

RTM_GETRULE,


RTM_NEWQDISC = 36,

RTM_DELQDISC,

RTM_GETQDISC,


RTM_NEWTCLASS = 40,

RTM_DELTCLASS,

RTM_GETTCLASS,


RTM_NEWTFILTER = 44,

RTM_DELTFILTER,

RTM_GETTFILTER,


RTM_NEWACTION = 48,

RTM_DELACTION,

RTM_GETACTION,


RTM_NEWPREFIX = 52,


RTM_GETMULTICAST = 58,


RTM_GETANYCAST = 62,


RTM_NEWNEIGHTBL = 64,

RTM_GETNEIGHTBL = 66,

RTM_SETNEIGHTBL,


RTM_NEWNDUSEROPT = 68,


RTM_NEWADDRLABEL = 72,

RTM_DELADDRLABEL,

RTM_GETADDRLABEL,


RTM_GETDCB = 78,

RTM_SETDCB,


RTM_NEWNETCONF = 80,

RTM_DELNETCONF,

RTM_GETNETCONF = 82,


RTM_NEWMDB = 84,

RTM_DELMDB = 85,

RTM_GETMDB = 86,


RTM_NEWNSID = 88,

RTM_DELNSID = 89,

RTM_GETNSID = 90,


RTM_NEWSTATS = 92,

RTM_GETSTATS = 94,

RTM_SETSTATS,


RTM_NEWCACHEREPORT = 96,


RTM_NEWCHAIN = 100,

RTM_DELCHAIN,

RTM_GETCHAIN,


RTM_NEWNEXTHOP = 104,

RTM_DELNEXTHOP,

RTM_GETNEXTHOP,


RTM_NEWLINKPROP = 108,

RTM_DELLINKPROP,

RTM_GETLINKPROP,


RTM_NEWVLAN = 112,

RTM_DELVLAN,

RTM_GETVLAN,


RTM_NEWNEXTHOPBUCKET = 116,

RTM_DELNEXTHOPBUCKET,

RTM_GETNEXTHOPBUCKET,


RTM_NEWTUNNEL = 120,

RTM_DELTUNNEL,

RTM_GETTUNNEL,


__RTM_MAX,

};
# 211 "./include/uapi/linux/rtnetlink.h"
struct rtattr {
unsigned short rta_len;
unsigned short rta_type;
};
# 237 "./include/uapi/linux/rtnetlink.h"
struct rtmsg {
unsigned char rtm_family;
unsigned char rtm_dst_len;
unsigned char rtm_src_len;
unsigned char rtm_tos;

unsigned char rtm_table;
unsigned char rtm_protocol;
unsigned char rtm_scope;
unsigned char rtm_type;

unsigned rtm_flags;
};



enum {
RTN_UNSPEC,
RTN_UNICAST,
RTN_LOCAL,
RTN_BROADCAST,

RTN_ANYCAST,

RTN_MULTICAST,
RTN_BLACKHOLE,
RTN_UNREACHABLE,
RTN_PROHIBIT,
RTN_THROW,
RTN_NAT,
RTN_XRESOLVE,
__RTN_MAX
};
# 320 "./include/uapi/linux/rtnetlink.h"
enum rt_scope_t {
RT_SCOPE_UNIVERSE=0,

RT_SCOPE_SITE=200,
RT_SCOPE_LINK=253,
RT_SCOPE_HOST=254,
RT_SCOPE_NOWHERE=255
};
# 347 "./include/uapi/linux/rtnetlink.h"
enum rt_class_t {
RT_TABLE_UNSPEC=0,

RT_TABLE_COMPAT=252,
RT_TABLE_DEFAULT=253,
RT_TABLE_MAIN=254,
RT_TABLE_LOCAL=255,
RT_TABLE_MAX=0xFFFFFFFF
};




enum rtattr_type_t {
RTA_UNSPEC,
RTA_DST,
RTA_SRC,
RTA_IIF,
RTA_OIF,
RTA_GATEWAY,
RTA_PRIORITY,
RTA_PREFSRC,
RTA_METRICS,
RTA_MULTIPATH,
RTA_PROTOINFO,
RTA_FLOW,
RTA_CACHEINFO,
RTA_SESSION,
RTA_MP_ALGO,
RTA_TABLE,
RTA_MARK,
RTA_MFC_STATS,
RTA_VIA,
RTA_NEWDST,
RTA_PREF,
RTA_ENCAP_TYPE,
RTA_ENCAP,
RTA_EXPIRES,
RTA_PAD,
RTA_UID,
RTA_TTL_PROPAGATE,
RTA_IP_PROTO,
RTA_SPORT,
RTA_DPORT,
RTA_NH_ID,
__RTA_MAX
};
# 409 "./include/uapi/linux/rtnetlink.h"
struct rtnexthop {
unsigned short rtnh_len;
unsigned char rtnh_flags;
unsigned char rtnh_hops;
int rtnh_ifindex;
};
# 441 "./include/uapi/linux/rtnetlink.h"
struct rtvia {
__kernel_sa_family_t rtvia_family;
__u8 rtvia_addr[0];
};



struct rta_cacheinfo {
__u32 rta_clntref;
__u32 rta_lastuse;
__s32 rta_expires;
__u32 rta_error;
__u32 rta_used;


__u32 rta_id;
__u32 rta_ts;
__u32 rta_tsage;
};



enum {
RTAX_UNSPEC,

RTAX_LOCK,

RTAX_MTU,

RTAX_WINDOW,

RTAX_RTT,

RTAX_RTTVAR,

RTAX_SSTHRESH,

RTAX_CWND,

RTAX_ADVMSS,

RTAX_REORDERING,

RTAX_HOPLIMIT,

RTAX_INITCWND,

RTAX_FEATURES,

RTAX_RTO_MIN,

RTAX_INITRWND,

RTAX_QUICKACK,

RTAX_CC_ALGO,

RTAX_FASTOPEN_NO_COOKIE,

__RTAX_MAX
};
# 513 "./include/uapi/linux/rtnetlink.h"
struct rta_session {
__u8 proto;
__u8 pad1;
__u16 pad2;

union {
struct {
__u16 sport;
__u16 dport;
} ports;

struct {
__u8 type;
__u8 code;
__u16 ident;
} icmpt;

__u32 spi;
} u;
};

struct rta_mfc_stats {
__u64 mfcs_packets;
__u64 mfcs_bytes;
__u64 mfcs_wrong_if;
};





struct rtgenmsg {
unsigned char rtgen_family;
};
# 557 "./include/uapi/linux/rtnetlink.h"
struct ifinfomsg {
unsigned char ifi_family;
unsigned char __ifi_pad;
unsigned short ifi_type;
int ifi_index;
unsigned ifi_flags;
unsigned ifi_change;
};





struct prefixmsg {
unsigned char prefix_family;
unsigned char prefix_pad1;
unsigned short prefix_pad2;
int prefix_ifindex;
unsigned char prefix_type;
unsigned char prefix_len;
unsigned char prefix_flags;
unsigned char prefix_pad3;
};

enum
{
PREFIX_UNSPEC,
PREFIX_ADDRESS,
PREFIX_CACHEINFO,
__PREFIX_MAX
};



struct prefix_cacheinfo {
__u32 preferred_time;
__u32 valid_time;
};






struct tcmsg {
unsigned char tcm_family;
unsigned char tcm__pad1;
unsigned short tcm__pad2;
int tcm_ifindex;
__u32 tcm_handle;
__u32 tcm_parent;




__u32 tcm_info;
};







enum {
TCA_UNSPEC,
TCA_KIND,
TCA_OPTIONS,
TCA_STATS,
TCA_XSTATS,
TCA_RATE,
TCA_FCNT,
TCA_STATS2,
TCA_STAB,
TCA_PAD,
TCA_DUMP_INVISIBLE,
TCA_CHAIN,
TCA_HW_OFFLOAD,
TCA_INGRESS_BLOCK,
TCA_EGRESS_BLOCK,
TCA_DUMP_FLAGS,
__TCA_MAX
};
# 655 "./include/uapi/linux/rtnetlink.h"
struct nduseroptmsg {
unsigned char nduseropt_family;
unsigned char nduseropt_pad1;
unsigned short nduseropt_opts_len;
int nduseropt_ifindex;
__u8 nduseropt_icmp_type;
__u8 nduseropt_icmp_code;
unsigned short nduseropt_pad2;
unsigned int nduseropt_pad3;

};

enum {
NDUSEROPT_UNSPEC,
NDUSEROPT_SRCADDR,
__NDUSEROPT_MAX
};
# 699 "./include/uapi/linux/rtnetlink.h"
enum rtnetlink_groups {
RTNLGRP_NONE,

RTNLGRP_LINK,

RTNLGRP_NOTIFY,

RTNLGRP_NEIGH,

RTNLGRP_TC,

RTNLGRP_IPV4_IFADDR,

RTNLGRP_IPV4_MROUTE,

RTNLGRP_IPV4_ROUTE,

RTNLGRP_IPV4_RULE,

RTNLGRP_IPV6_IFADDR,

RTNLGRP_IPV6_MROUTE,

RTNLGRP_IPV6_ROUTE,

RTNLGRP_IPV6_IFINFO,

RTNLGRP_DECnet_IFADDR,

RTNLGRP_NOP2,
RTNLGRP_DECnet_ROUTE,

RTNLGRP_DECnet_RULE,

RTNLGRP_NOP4,
RTNLGRP_IPV6_PREFIX,

RTNLGRP_IPV6_RULE,

RTNLGRP_ND_USEROPT,

RTNLGRP_PHONET_IFADDR,

RTNLGRP_PHONET_ROUTE,

RTNLGRP_DCB,

RTNLGRP_IPV4_NETCONF,

RTNLGRP_IPV6_NETCONF,

RTNLGRP_MDB,

RTNLGRP_MPLS_ROUTE,

RTNLGRP_NSID,

RTNLGRP_MPLS_NETCONF,

RTNLGRP_IPV4_MROUTE_R,

RTNLGRP_IPV6_MROUTE_R,

RTNLGRP_NEXTHOP,

RTNLGRP_BRVLAN,

RTNLGRP_MCTP_IFADDR,

RTNLGRP_TUNNEL,

RTNLGRP_STATS,

__RTNLGRP_MAX
};



struct tcamsg {
unsigned char tca_family;
unsigned char tca__pad1;
unsigned short tca__pad2;
};

enum {
TCA_ROOT_UNSPEC,
TCA_ROOT_TAB,


TCA_ROOT_FLAGS,
TCA_ROOT_COUNT,
TCA_ROOT_TIME_DELTA,
__TCA_ROOT_MAX,

};
# 11 "./include/linux/rtnetlink.h" 2

extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
u32 group, struct nlmsghdr *nlh, gfp_t flags);
extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
u32 id, long expires, u32 error);

void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
gfp_t flags, int *new_nsid, int new_ifindex);
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
unsigned change, u32 event,
gfp_t flags, int *new_nsid,
int new_ifindex);
void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
gfp_t flags);



extern void rtnl_lock(void);
extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
extern int rtnl_lock_killable(void);
extern bool refcount_dec_and_rtnl_lock(refcount_t *r);

extern wait_queue_head_t netdev_unregistering_wq;
extern struct rw_semaphore pernet_ops_rwsem;
extern struct rw_semaphore net_rwsem;




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool lockdep_rtnl_is_held(void)
{
return true;
}
# 83 "./include/linux/rtnetlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct netdev_queue *dev_ingress_queue(struct net_device *dev)
{
return ({ do { } while (0 && (!((lockdep_rtnl_is_held())))); ; ((typeof(*(dev->ingress_queue)) *)((dev->ingress_queue))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev)
{
return ({ typeof(*(dev->ingress_queue)) *__UNIQUE_ID_rcu350 = (typeof(*(dev->ingress_queue)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_351(void) ; if (!((sizeof((dev->ingress_queue)) == sizeof(char) || sizeof((dev->ingress_queue)) == sizeof(short) || sizeof((dev->ingress_queue)) == sizeof(int) || sizeof((dev->ingress_queue)) == sizeof(long)) || sizeof((dev->ingress_queue)) == sizeof(long long))) __compiletime_assert_351(); } while (0); (*(const volatile typeof( _Generic(((dev->ingress_queue)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((dev->ingress_queue)))) *)&((dev->ingress_queue))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(dev->ingress_queue)) *)(__UNIQUE_ID_rcu350)); });
}

struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
# 105 "./include/linux/rtnetlink.h"
void rtnetlink_init(void);
void __rtnl_unlock(void);
void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);





extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
int *idx);
extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
u16 vid,
u16 flags);
extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
u16 vid);

extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u16 mode,
u32 flags, u32 mask, int nlflags,
u32 filter_mask,
int (*vlan_fill)(struct sk_buff *skb,
struct net_device *dev,
u32 filter_mask));

extern void rtnl_offload_xstats_notify(struct net_device *dev);
# 15 "./include/net/dst.h" 2




# 1 "./include/net/neighbour.h" 1
# 31 "./include/net/neighbour.h"
# 1 "./include/net/rtnetlink.h" 1





# 1 "./include/net/netlink.h" 1
# 165 "./include/net/netlink.h"
enum {
NLA_UNSPEC,
NLA_U8,
NLA_U16,
NLA_U32,
NLA_U64,
NLA_STRING,
NLA_FLAG,
NLA_MSECS,
NLA_NESTED,
NLA_NESTED_ARRAY,
NLA_NUL_STRING,
NLA_BINARY,
NLA_S8,
NLA_S16,
NLA_S32,
NLA_S64,
NLA_BITFIELD32,
NLA_REJECT,
__NLA_TYPE_MAX,
};



struct netlink_range_validation {
u64 min, max;
};

struct netlink_range_validation_signed {
s64 min, max;
};

enum nla_policy_validation {
NLA_VALIDATE_NONE,
NLA_VALIDATE_RANGE,
NLA_VALIDATE_RANGE_WARN_TOO_LONG,
NLA_VALIDATE_MIN,
NLA_VALIDATE_MAX,
NLA_VALIDATE_MASK,
NLA_VALIDATE_RANGE_PTR,
NLA_VALIDATE_FUNCTION,
};
# 315 "./include/net/netlink.h"
struct nla_policy {
u8 type;
u8 validation_type;
u16 len;
union {
const u32 bitfield32_valid;
const u32 mask;
const char *reject_message;
const struct nla_policy *nested_policy;
struct netlink_range_validation *range;
struct netlink_range_validation_signed *range_signed;
struct {
s16 min, max;
};
int (*validate)(const struct nlattr *attr,
struct netlink_ext_ack *extack);
# 349 "./include/net/netlink.h"
u16 strict_start_type;
};
};
# 453 "./include/net/netlink.h"
struct nl_info {
struct nlmsghdr *nlh;
struct net *nl_net;
u32 portid;
u8 skip_notify:1,
skip_notify_kernel:1;
};
# 478 "./include/net/netlink.h"
enum netlink_validation {
NL_VALIDATE_LIBERAL = 0,
NL_VALIDATE_TRAILING = ((((1UL))) << (0)),
NL_VALIDATE_MAXTYPE = ((((1UL))) << (1)),
NL_VALIDATE_UNSPEC = ((((1UL))) << (2)),
NL_VALIDATE_STRICT_ATTRS = ((((1UL))) << (3)),
NL_VALIDATE_NESTED = ((((1UL))) << (4)),
};
# 495 "./include/net/netlink.h"
int netlink_rcv_skb(struct sk_buff *skb,
int (*cb)(struct sk_buff *, struct nlmsghdr *,
struct netlink_ext_ack *));
int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
unsigned int group, int report, gfp_t flags);

int __nla_validate(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack);
int __nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
int len, const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack);
int nla_policy_len(const struct nla_policy *, int);
struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
ssize_t nla_strscpy(char *dst, const struct nlattr *nla, size_t dstsize);
char *nla_strdup(const struct nlattr *nla, gfp_t flags);
int nla_memcpy(void *dest, const struct nlattr *src, int count);
int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
int nla_strcmp(const struct nlattr *nla, const char *str);
struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
int attrlen, int padattr);
void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype,
int attrlen, int padattr);
void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
const void *data);
void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
const void *data, int padattr);
void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
const void *data, int padattr);
int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
int nla_append(struct sk_buff *skb, int attrlen, const void *data);
# 541 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nlmsg_msg_size(int payload)
{
return ((int) ( ((sizeof(struct nlmsghdr))+4U -1) & ~(4U -1) )) + payload;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nlmsg_total_size(int payload)
{
return ( ((nlmsg_msg_size(payload))+4U -1) & ~(4U -1) );
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nlmsg_padlen(int payload)
{
return nlmsg_total_size(payload) - nlmsg_msg_size(payload);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *nlmsg_data(const struct nlmsghdr *nlh)
{
return (unsigned char *) nlh + ((int) ( ((sizeof(struct nlmsghdr))+4U -1) & ~(4U -1) ));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nlmsg_len(const struct nlmsghdr *nlh)
{
return nlh->nlmsg_len - ((int) ( ((sizeof(struct nlmsghdr))+4U -1) & ~(4U -1) ));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct nlattr *nlmsg_attrdata(const struct nlmsghdr *nlh,
int hdrlen)
{
unsigned char *data = nlmsg_data(nlh);
return (struct nlattr *) (data + ( ((hdrlen)+4U -1) & ~(4U -1) ));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nlmsg_attrlen(const struct nlmsghdr *nlh, int hdrlen)
{
return nlmsg_len(nlh) - ( ((hdrlen)+4U -1) & ~(4U -1) );
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
{
return (remaining >= (int) sizeof(struct nlmsghdr) &&
nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
nlh->nlmsg_len <= remaining);
}
# 624 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct nlmsghdr *
nlmsg_next(const struct nlmsghdr *nlh, int *remaining)
{
int totlen = ( ((nlh->nlmsg_len)+4U -1) & ~(4U -1) );

*remaining -= totlen;

return (struct nlmsghdr *) ((unsigned char *) nlh + totlen);
}
# 650 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_parse(struct nlattr **tb, int maxtype,
const struct nlattr *head, int len,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_parse(tb, maxtype, head, len, policy,
(NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE | NL_VALIDATE_UNSPEC | NL_VALIDATE_STRICT_ATTRS | NL_VALIDATE_NESTED), extack);
}
# 675 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_parse_deprecated(struct nlattr **tb, int maxtype,
const struct nlattr *head, int len,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_parse(tb, maxtype, head, len, policy,
NL_VALIDATE_LIBERAL, extack);
}
# 700 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_parse_deprecated_strict(struct nlattr **tb, int maxtype,
const struct nlattr *head,
int len,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_parse(tb, maxtype, head, len, policy,
(NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE), extack);
}
# 722 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
struct nlattr *tb[], int maxtype,
const struct nla_policy *policy,
unsigned int validate,
struct netlink_ext_ack *extack)
{
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) {
do { static const char __msg[] = "Invalid header length"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
return -22;
}

return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
nlmsg_attrlen(nlh, hdrlen), policy, validate,
extack);
}
# 748 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
struct nlattr *tb[], int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
(NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE | NL_VALIDATE_UNSPEC | NL_VALIDATE_STRICT_ATTRS | NL_VALIDATE_NESTED), extack);
}
# 767 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nlmsg_parse_deprecated(const struct nlmsghdr *nlh, int hdrlen,
struct nlattr *tb[], int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
NL_VALIDATE_LIBERAL, extack);
}
# 786 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
nlmsg_parse_deprecated_strict(const struct nlmsghdr *nlh, int hdrlen,
struct nlattr *tb[], int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
(NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE), extack);
}
# 804 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
int hdrlen, int attrtype)
{
return nla_find(nlmsg_attrdata(nlh, hdrlen),
nlmsg_attrlen(nlh, hdrlen), attrtype);
}
# 826 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_validate_deprecated(const struct nlattr *head, int len,
int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_validate(head, len, maxtype, policy, NL_VALIDATE_LIBERAL,
extack);
}
# 849 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_validate(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_validate(head, len, maxtype, policy, (NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE | NL_VALIDATE_UNSPEC | NL_VALIDATE_STRICT_ATTRS | NL_VALIDATE_NESTED),
extack);
}
# 865 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nlmsg_validate_deprecated(const struct nlmsghdr *nlh,
int hdrlen, int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
return -22;

return __nla_validate(nlmsg_attrdata(nlh, hdrlen),
nlmsg_attrlen(nlh, hdrlen), maxtype,
policy, NL_VALIDATE_LIBERAL, extack);
}
# 886 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nlmsg_report(const struct nlmsghdr *nlh)
{
return nlh ? !!(nlh->nlmsg_flags & 0x08) : 0;
}
# 914 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
int type, int payload, int flags)
{
if (__builtin_expect(!!(skb_tailroom(skb) < nlmsg_total_size(payload)), 0))
return ((void *)0);

return __nlmsg_put(skb, portid, seq, type, payload, flags);
}
# 934 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
struct netlink_callback *cb,
int type, int payload,
int flags)
{
return nlmsg_put(skb, (*(struct netlink_skb_parms*)&((cb->skb)->cb)).portid, cb->nlh->nlmsg_seq,
type, payload, flags);
}
# 951 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
{
return alloc_skb(nlmsg_total_size(payload), flags);
}
# 965 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
{
nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *nlmsg_get_pos(struct sk_buff *skb)
{
return skb_tail_pointer(skb);
}
# 988 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void nlmsg_trim(struct sk_buff *skb, const void *mark)
{
if (mark) {
({ int __ret_warn_on = !!((unsigned char *) mark < skb->data); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/netlink.h"), "i" (991), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
skb_trim(skb, (unsigned char *) mark - skb->data);
}
}
# 1004 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh)
{
nlmsg_trim(skb, nlh);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void nlmsg_free(struct sk_buff *skb)
{
kfree_skb(skb);
}
# 1026 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
u32 portid, unsigned int group, gfp_t flags)
{
int err;

(*(struct netlink_skb_parms*)&((skb)->cb)).dst_group = group;

err = netlink_broadcast(sk, skb, portid, group, flags);
if (err > 0)
err = 0;

return err;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 portid)
{
int err;

err = netlink_unicast(sk, skb, portid, 0x40);
if (err > 0)
err = 0;

return err;
}
# 1084 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
nl_dump_check_consistent(struct netlink_callback *cb,
struct nlmsghdr *nlh)
{
if (cb->prev_seq && cb->seq != cb->prev_seq)
nlh->nlmsg_flags |= 0x10;
cb->prev_seq = cb->seq;
}
# 1101 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_attr_size(int payload)
{
return ((int) (((sizeof(struct nlattr)) + 4 - 1) & ~(4 - 1))) + payload;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_total_size(int payload)
{
return (((nla_attr_size(payload)) + 4 - 1) & ~(4 - 1));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_padlen(int payload)
{
return nla_total_size(payload) - nla_attr_size(payload);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_type(const struct nlattr *nla)
{
return nla->nla_type & ~((1 << 15) | (1 << 14));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *nla_data(const struct nlattr *nla)
{
return (char *) nla + ((int) (((sizeof(struct nlattr)) + 4 - 1) & ~(4 - 1)));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_len(const struct nlattr *nla)
{
return nla->nla_len - ((int) (((sizeof(struct nlattr)) + 4 - 1) & ~(4 - 1)));
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_ok(const struct nlattr *nla, int remaining)
{
return remaining >= (int) sizeof(*nla) &&
nla->nla_len >= sizeof(*nla) &&
nla->nla_len <= remaining;
}
# 1171 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
{
unsigned int totlen = (((nla->nla_len) + 4 - 1) & ~(4 - 1));

*remaining -= totlen;
return (struct nlattr *) ((char *) nla + totlen);
}
# 1186 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct nlattr *
nla_find_nested(const struct nlattr *nla, int attrtype)
{
return nla_find(nla_data(nla), nla_len(nla), attrtype);
}
# 1202 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_parse_nested(struct nlattr *tb[], int maxtype,
const struct nlattr *nla,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
if (!(nla->nla_type & (1 << 15))) {
do { static const char __msg[] = "NLA_F_NESTED is missing"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) { __extack->_msg = __msg; __extack->bad_attr = (nla); __extack->policy = (((void *)0)); } } while (0);
return -22;
}

return __nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy,
(NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE | NL_VALIDATE_UNSPEC | NL_VALIDATE_STRICT_ATTRS | NL_VALIDATE_NESTED), extack);
}
# 1226 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_parse_nested_deprecated(struct nlattr *tb[], int maxtype,
const struct nlattr *nla,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy,
NL_VALIDATE_LIBERAL, extack);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
{

u8 tmp = value;

return nla_put(skb, attrtype, sizeof(u8), &tmp);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
{
u16 tmp = value;

return nla_put(skb, attrtype, sizeof(u16), &tmp);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
{
__be16 tmp = value;

return nla_put(skb, attrtype, sizeof(__be16), &tmp);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
{
__be16 tmp = value;

return nla_put_be16(skb, attrtype | (1 << 14), tmp);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
{
__le16 tmp = value;

return nla_put(skb, attrtype, sizeof(__le16), &tmp);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
{
u32 tmp = value;

return nla_put(skb, attrtype, sizeof(u32), &tmp);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
{
__be32 tmp = value;

return nla_put(skb, attrtype, sizeof(__be32), &tmp);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
{
__be32 tmp = value;

return nla_put_be32(skb, attrtype | (1 << 14), tmp);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
{
__le32 tmp = value;

return nla_put(skb, attrtype, sizeof(__le32), &tmp);
}
# 1360 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
u64 value, int padattr)
{
u64 tmp = value;

return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
}
# 1375 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
int padattr)
{
__be64 tmp = value;

return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr);
}
# 1390 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
int padattr)
{
__be64 tmp = value;

return nla_put_be64(skb, attrtype | (1 << 14), tmp,
padattr);
}
# 1406 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
int padattr)
{
__le64 tmp = value;

return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
{
s8 tmp = value;

return nla_put(skb, attrtype, sizeof(s8), &tmp);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
{
s16 tmp = value;

return nla_put(skb, attrtype, sizeof(s16), &tmp);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
{
s32 tmp = value;

return nla_put(skb, attrtype, sizeof(s32), &tmp);
}
# 1460 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
int padattr)
{
s64 tmp = value;

return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_string(struct sk_buff *skb, int attrtype,
const char *str)
{
return nla_put(skb, attrtype, strlen(str) + 1, str);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_flag(struct sk_buff *skb, int attrtype)
{
return nla_put(skb, attrtype, 0, ((void *)0));
}
# 1497 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_msecs(struct sk_buff *skb, int attrtype,
unsigned long njiffies, int padattr)
{
u64 tmp = jiffies_to_msecs(njiffies);

return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
}
# 1512 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_in_addr(struct sk_buff *skb, int attrtype,
__be32 addr)
{
__be32 tmp = addr;

return nla_put_be32(skb, attrtype, tmp);
}
# 1527 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_in6_addr(struct sk_buff *skb, int attrtype,
const struct in6_addr *addr)
{
return nla_put(skb, attrtype, sizeof(*addr), addr);
}
# 1540 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_put_bitfield32(struct sk_buff *skb, int attrtype,
__u32 value, __u32 selector)
{
struct nla_bitfield32 tmp = { value, selector, };

return nla_put(skb, attrtype, sizeof(tmp), &tmp);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 nla_get_u32(const struct nlattr *nla)
{
return *(u32 *) nla_data(nla);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be32 nla_get_be32(const struct nlattr *nla)
{
return *(__be32 *) nla_data(nla);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __le32 nla_get_le32(const struct nlattr *nla)
{
return *(__le32 *) nla_data(nla);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 nla_get_u16(const struct nlattr *nla)
{
return *(u16 *) nla_data(nla);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be16 nla_get_be16(const struct nlattr *nla)
{
return *(__be16 *) nla_data(nla);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __le16 nla_get_le16(const struct nlattr *nla)
{
return *(__le16 *) nla_data(nla);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 nla_get_u8(const struct nlattr *nla)
{
return *(u8 *) nla_data(nla);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 nla_get_u64(const struct nlattr *nla)
{
u64 tmp;

nla_memcpy(&tmp, nla, sizeof(tmp));

return tmp;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be64 nla_get_be64(const struct nlattr *nla)
{
__be64 tmp;

nla_memcpy(&tmp, nla, sizeof(tmp));

return tmp;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __le64 nla_get_le64(const struct nlattr *nla)
{
return *(__le64 *) nla_data(nla);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s32 nla_get_s32(const struct nlattr *nla)
{
return *(s32 *) nla_data(nla);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s16 nla_get_s16(const struct nlattr *nla)
{
return *(s16 *) nla_data(nla);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s8 nla_get_s8(const struct nlattr *nla)
{
return *(s8 *) nla_data(nla);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 nla_get_s64(const struct nlattr *nla)
{
s64 tmp;

nla_memcpy(&tmp, nla, sizeof(tmp));

return tmp;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_get_flag(const struct nlattr *nla)
{
return !!nla;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long nla_get_msecs(const struct nlattr *nla)
{
u64 msecs = nla_get_u64(nla);

return msecs_to_jiffies((unsigned long) msecs);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be32 nla_get_in_addr(const struct nlattr *nla)
{
return *(__be32 *) nla_data(nla);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct in6_addr nla_get_in6_addr(const struct nlattr *nla)
{
struct in6_addr tmp;

nla_memcpy(&tmp, nla, sizeof(tmp));
return tmp;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct nla_bitfield32 nla_get_bitfield32(const struct nlattr *nla)
{
struct nla_bitfield32 tmp;

nla_memcpy(&tmp, nla, sizeof(tmp));
return tmp;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *nla_memdup(const struct nlattr *src, gfp_t gfp)
{
return kmemdup(nla_data(src), nla_len(src), gfp);
}
# 1762 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct nlattr *nla_nest_start_noflag(struct sk_buff *skb,
int attrtype)
{
struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb);

if (nla_put(skb, attrtype, 0, ((void *)0)) < 0)
return ((void *)0);

return start;
}
# 1783 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
{
return nla_nest_start_noflag(skb, attrtype | (1 << 15));
}
# 1798 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
{
start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start;
return skb->len;
}
# 1812 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
{
nlmsg_trim(skb, start);
}
# 1831 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __nla_validate_nested(const struct nlattr *start, int maxtype,
const struct nla_policy *policy,
unsigned int validate,
struct netlink_ext_ack *extack)
{
return __nla_validate(nla_data(start), nla_len(start), maxtype, policy,
validate, extack);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
nla_validate_nested(const struct nlattr *start, int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_validate_nested(start, maxtype, policy,
(NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE | NL_VALIDATE_UNSPEC | NL_VALIDATE_STRICT_ATTRS | NL_VALIDATE_NESTED), extack);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
nla_validate_nested_deprecated(const struct nlattr *start, int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
return __nla_validate_nested(start, maxtype, policy,
NL_VALIDATE_LIBERAL, extack);
}
# 1865 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool nla_need_padding_for_64bit(struct sk_buff *skb)
{






if (((((unsigned long)skb_tail_pointer(skb)) & ((typeof((unsigned long)skb_tail_pointer(skb)))(8) - 1)) == 0))
return true;

return false;
}
# 1891 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_align_64bit(struct sk_buff *skb, int padattr)
{
if (nla_need_padding_for_64bit(skb) &&
!nla_reserve(skb, padattr, 0))
return -90;

return 0;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nla_total_size_64bit(int payload)
{
return (((nla_attr_size(payload)) + 4 - 1) & ~(4 - 1))

+ (((nla_attr_size(0)) + 4 - 1) & ~(4 - 1))

;
}
# 1939 "./include/net/netlink.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool nla_is_last(const struct nlattr *nla, int rem)
{
return nla->nla_len == rem;
}

void nla_get_range_unsigned(const struct nla_policy *pt,
struct netlink_range_validation *range);
void nla_get_range_signed(const struct nla_policy *pt,
struct netlink_range_validation_signed *range);

struct netlink_policy_dump_state;

int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
const struct nla_policy *policy,
unsigned int maxtype);
int netlink_policy_dump_get_policy_idx(struct netlink_policy_dump_state *state,
const struct nla_policy *policy,
unsigned int maxtype);
bool netlink_policy_dump_loop(struct netlink_policy_dump_state *state);
int netlink_policy_dump_write(struct sk_buff *skb,
struct netlink_policy_dump_state *state);
int netlink_policy_dump_attr_size_estimate(const struct nla_policy *pt);
int netlink_policy_dump_write_attr(struct sk_buff *skb,
const struct nla_policy *pt,
int nestattr);
void netlink_policy_dump_free(struct netlink_policy_dump_state *state);
# 7 "./include/net/rtnetlink.h" 2

typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *,
struct netlink_ext_ack *);
typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);

enum rtnl_link_flags {
RTNL_FLAG_DOIT_UNLOCKED = 1,
};

void rtnl_register(int protocol, int msgtype,
rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
int rtnl_register_module(struct module *owner, int protocol, int msgtype,
rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
int rtnl_unregister(int protocol, int msgtype);
void rtnl_unregister_all(int protocol);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rtnl_msg_family(const struct nlmsghdr *nlh)
{
if (nlmsg_len(nlh) >= sizeof(struct rtgenmsg))
return ((struct rtgenmsg *) nlmsg_data(nlh))->rtgen_family;
else
return 0;
}
# 63 "./include/net/rtnetlink.h"
struct rtnl_link_ops {
struct list_head list;

const char *kind;

size_t priv_size;
struct net_device *(*alloc)(struct nlattr *tb[],
const char *ifname,
unsigned char name_assign_type,
unsigned int num_tx_queues,
unsigned int num_rx_queues);
void (*setup)(struct net_device *dev);

bool netns_refund;
unsigned int maxtype;
const struct nla_policy *policy;
int (*validate)(struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack);

int (*newlink)(struct net *src_net,
struct net_device *dev,
struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack);
int (*changelink)(struct net_device *dev,
struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack);
void (*dellink)(struct net_device *dev,
struct list_head *head);

size_t (*get_size)(const struct net_device *dev);
int (*fill_info)(struct sk_buff *skb,
const struct net_device *dev);

size_t (*get_xstats_size)(const struct net_device *dev);
int (*fill_xstats)(struct sk_buff *skb,
const struct net_device *dev);
unsigned int (*get_num_tx_queues)(void);
unsigned int (*get_num_rx_queues)(void);

unsigned int slave_maxtype;
const struct nla_policy *slave_policy;
int (*slave_changelink)(struct net_device *dev,
struct net_device *slave_dev,
struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack);
size_t (*get_slave_size)(const struct net_device *dev,
const struct net_device *slave_dev);
int (*fill_slave_info)(struct sk_buff *skb,
const struct net_device *dev,
const struct net_device *slave_dev);
struct net *(*get_link_net)(const struct net_device *dev);
size_t (*get_linkxstats_size)(const struct net_device *dev,
int attr);
int (*fill_linkxstats)(struct sk_buff *skb,
const struct net_device *dev,
int *prividx, int attr);
};

int __rtnl_link_register(struct rtnl_link_ops *ops);
void __rtnl_link_unregister(struct rtnl_link_ops *ops);

int rtnl_link_register(struct rtnl_link_ops *ops);
void rtnl_link_unregister(struct rtnl_link_ops *ops);
# 145 "./include/net/rtnetlink.h"
struct rtnl_af_ops {
struct list_head list;
int family;

int (*fill_link_af)(struct sk_buff *skb,
const struct net_device *dev,
u32 ext_filter_mask);
size_t (*get_link_af_size)(const struct net_device *dev,
u32 ext_filter_mask);

int (*validate_link_af)(const struct net_device *dev,
const struct nlattr *attr,
struct netlink_ext_ack *extack);
int (*set_link_af)(struct net_device *dev,
const struct nlattr *attr,
struct netlink_ext_ack *extack);
int (*fill_stats_af)(struct sk_buff *skb,
const struct net_device *dev);
size_t (*get_stats_af_size)(const struct net_device *dev);
};

void rtnl_af_register(struct rtnl_af_ops *ops);
void rtnl_af_unregister(struct rtnl_af_ops *ops);

struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
struct net_device *rtnl_create_link(struct net *net, const char *ifname,
unsigned char name_assign_type,
const struct rtnl_link_ops *ops,
struct nlattr *tb[],
struct netlink_ext_ack *extack);
int rtnl_delete_link(struct net_device *dev);
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);

int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
struct netlink_ext_ack *exterr);
struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
# 32 "./include/net/neighbour.h" 2
# 41 "./include/net/neighbour.h"
struct neighbour;

enum {
NEIGH_VAR_MCAST_PROBES,
NEIGH_VAR_UCAST_PROBES,
NEIGH_VAR_APP_PROBES,
NEIGH_VAR_MCAST_REPROBES,
NEIGH_VAR_RETRANS_TIME,
NEIGH_VAR_BASE_REACHABLE_TIME,
NEIGH_VAR_DELAY_PROBE_TIME,
NEIGH_VAR_GC_STALETIME,
NEIGH_VAR_QUEUE_LEN_BYTES,
NEIGH_VAR_PROXY_QLEN,
NEIGH_VAR_ANYCAST_DELAY,
NEIGH_VAR_PROXY_DELAY,
NEIGH_VAR_LOCKTIME,


NEIGH_VAR_QUEUE_LEN,
NEIGH_VAR_RETRANS_TIME_MS,
NEIGH_VAR_BASE_REACHABLE_TIME_MS,

NEIGH_VAR_GC_INTERVAL,
NEIGH_VAR_GC_THRESH1,
NEIGH_VAR_GC_THRESH2,
NEIGH_VAR_GC_THRESH3,
NEIGH_VAR_MAX
};

struct neigh_parms {
possible_net_t net;
struct net_device *dev;
netdevice_tracker dev_tracker;
struct list_head list;
int (*neigh_setup)(struct neighbour *);
struct neigh_table *tbl;

void *sysctl_table;

int dead;
refcount_t refcnt;
struct callback_head callback_head;

int reachable_time;
int data[(NEIGH_VAR_LOCKTIME + 1)];
unsigned long data_state[((((NEIGH_VAR_LOCKTIME + 1)) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))];
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void neigh_var_set(struct neigh_parms *p, int index, int val)
{
set_bit(index, p->data_state);
p->data[index] = val;
}
# 103 "./include/net/neighbour.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void neigh_parms_data_state_setall(struct neigh_parms *p)
{
bitmap_fill(p->data_state, (NEIGH_VAR_LOCKTIME + 1));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void neigh_parms_data_state_cleanall(struct neigh_parms *p)
{
bitmap_zero(p->data_state, (NEIGH_VAR_LOCKTIME + 1));
}

struct neigh_statistics {
unsigned long allocs;
unsigned long destroys;
unsigned long hash_grows;

unsigned long res_failed;

unsigned long lookups;
unsigned long hits;

unsigned long rcv_probes_mcast;
unsigned long rcv_probes_ucast;

unsigned long periodic_gc_runs;
unsigned long forced_gc_runs;

unsigned long unres_discards;
unsigned long table_fulls;
};



struct neighbour {
struct neighbour *next;
struct neigh_table *tbl;
struct neigh_parms *parms;
unsigned long confirmed;
unsigned long updated;
rwlock_t lock;
refcount_t refcnt;
unsigned int arp_queue_len_bytes;
struct sk_buff_head arp_queue;
struct timer_list timer;
unsigned long used;
atomic_t probes;
u8 nud_state;
u8 type;
u8 dead;
u8 protocol;
u32 flags;
seqlock_t ha_lock;
unsigned char ha[((((32)) + ((typeof((32)))((sizeof(unsigned long))) - 1)) & ~((typeof((32)))((sizeof(unsigned long))) - 1))] __attribute__((__aligned__(8)));
struct hh_cache hh;
int (*output)(struct neighbour *, struct sk_buff *);
const struct neigh_ops *ops;
struct list_head gc_list;
struct list_head managed_list;
struct callback_head rcu;
struct net_device *dev;
netdevice_tracker dev_tracker;
u8 primary_key[0];
} ;

struct neigh_ops {
int family;
void (*solicit)(struct neighbour *, struct sk_buff *);
void (*error_report)(struct neighbour *, struct sk_buff *);
int (*output)(struct neighbour *, struct sk_buff *);
int (*connected_output)(struct neighbour *, struct sk_buff *);
};

struct pneigh_entry {
struct pneigh_entry *next;
possible_net_t net;
struct net_device *dev;
netdevice_tracker dev_tracker;
u32 flags;
u8 protocol;
u8 key[];
};







struct neigh_hash_table {
struct neighbour **hash_buckets;
unsigned int hash_shift;
__u32 hash_rnd[4];
struct callback_head rcu;
};


struct neigh_table {
int family;
unsigned int entry_size;
unsigned int key_len;
__be16 protocol;
__u32 (*hash)(const void *pkey,
const struct net_device *dev,
__u32 *hash_rnd);
bool (*key_eq)(const struct neighbour *, const void *pkey);
int (*constructor)(struct neighbour *);
int (*pconstructor)(struct pneigh_entry *);
void (*pdestructor)(struct pneigh_entry *);
void (*proxy_redo)(struct sk_buff *skb);
int (*is_multicast)(const void *pkey);
bool (*allow_add)(const struct net_device *dev,
struct netlink_ext_ack *extack);
char *id;
struct neigh_parms parms;
struct list_head parms_list;
int gc_interval;
int gc_thresh1;
int gc_thresh2;
int gc_thresh3;
unsigned long last_flush;
struct delayed_work gc_work;
struct delayed_work managed_work;
struct timer_list proxy_timer;
struct sk_buff_head proxy_queue;
atomic_t entries;
atomic_t gc_entries;
struct list_head gc_list;
struct list_head managed_list;
rwlock_t lock;
unsigned long last_rand;
struct neigh_statistics *stats;
struct neigh_hash_table *nht;
struct pneigh_entry **phash_buckets;
};

enum {
NEIGH_ARP_TABLE = 0,
NEIGH_ND_TABLE = 1,
NEIGH_DN_TABLE = 2,
NEIGH_NR_TABLES,
NEIGH_LINK_TABLE = NEIGH_NR_TABLES
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int neigh_parms_family(struct neigh_parms *p)
{
return p->tbl->family;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *neighbour_priv(const struct neighbour *n)
{
return (char *)n + n->tbl->entry_size;
}
# 275 "./include/net/neighbour.h"
extern const struct nla_policy nda_policy[];

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
{
return *(const u16 *)n->primary_key == *(const u16 *)pkey;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
{
return *(const u32 *)n->primary_key == *(const u32 *)pkey;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool neigh_key_eq128(const struct neighbour *n, const void *pkey)
{
const u32 *n32 = (const u32 *)n->primary_key;
const u32 *p32 = pkey;

return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
(n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *___neigh_lookup_noref(
struct neigh_table *tbl,
bool (*key_eq)(const struct neighbour *n, const void *pkey),
__u32 (*hash)(const void *pkey,
const struct net_device *dev,
__u32 *hash_rnd),
const void *pkey,
struct net_device *dev)
{
struct neigh_hash_table *nht = ({ typeof(*(tbl->nht)) *__UNIQUE_ID_rcu352 = (typeof(*(tbl->nht)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_353(void) ; if (!((sizeof((tbl->nht)) == sizeof(char) || sizeof((tbl->nht)) == sizeof(short) || sizeof((tbl->nht)) == sizeof(int) || sizeof((tbl->nht)) == sizeof(long)) || sizeof((tbl->nht)) == sizeof(long long))) __compiletime_assert_353(); } while (0); (*(const volatile typeof( _Generic(((tbl->nht)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tbl->nht)))) *)&((tbl->nht))); }); do { } while (0 && (!((0) || rcu_read_lock_bh_held()))); ; ((typeof(*(tbl->nht)) *)(__UNIQUE_ID_rcu352)); });
struct neighbour *n;
u32 hash_val;

hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
for (n = ({ typeof(*(nht->hash_buckets[hash_val])) *__UNIQUE_ID_rcu354 = (typeof(*(nht->hash_buckets[hash_val])) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_355(void) ; if (!((sizeof((nht->hash_buckets[hash_val])) == sizeof(char) || sizeof((nht->hash_buckets[hash_val])) == sizeof(short) || sizeof((nht->hash_buckets[hash_val])) == sizeof(int) || sizeof((nht->hash_buckets[hash_val])) == sizeof(long)) || sizeof((nht->hash_buckets[hash_val])) == sizeof(long long))) __compiletime_assert_355(); } while (0); (*(const volatile typeof( _Generic(((nht->hash_buckets[hash_val])), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nht->hash_buckets[hash_val])))) *)&((nht->hash_buckets[hash_val]))); }); do { } while (0 && (!((0) || rcu_read_lock_bh_held()))); ; ((typeof(*(nht->hash_buckets[hash_val])) *)(__UNIQUE_ID_rcu354)); });
n != ((void *)0);
n = ({ typeof(*(n->next)) *__UNIQUE_ID_rcu356 = (typeof(*(n->next)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_357(void) ; if (!((sizeof((n->next)) == sizeof(char) || sizeof((n->next)) == sizeof(short) || sizeof((n->next)) == sizeof(int) || sizeof((n->next)) == sizeof(long)) || sizeof((n->next)) == sizeof(long long))) __compiletime_assert_357(); } while (0); (*(const volatile typeof( _Generic(((n->next)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((n->next)))) *)&((n->next))); }); do { } while (0 && (!((0) || rcu_read_lock_bh_held()))); ; ((typeof(*(n->next)) *)(__UNIQUE_ID_rcu356)); })) {
if (n->dev == dev && key_eq(n, pkey))
return n;
}

return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
const void *pkey,
struct net_device *dev)
{
return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void neigh_confirm(struct neighbour *n)
{
if (n) {
unsigned long now = jiffies;


if (({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_358(void) ; if (!((sizeof(n->confirmed) == sizeof(char) || sizeof(n->confirmed) == sizeof(short) || sizeof(n->confirmed) == sizeof(int) || sizeof(n->confirmed) == sizeof(long)) || sizeof(n->confirmed) == sizeof(long long))) __compiletime_assert_358(); } while (0); (*(const volatile typeof( _Generic((n->confirmed), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (n->confirmed))) *)&(n->confirmed)); }) != now)
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_359(void) ; if (!((sizeof(n->confirmed) == sizeof(char) || sizeof(n->confirmed) == sizeof(short) || sizeof(n->confirmed) == sizeof(int) || sizeof(n->confirmed) == sizeof(long)) || sizeof(n->confirmed) == sizeof(long long))) __compiletime_assert_359(); } while (0); do { *(volatile typeof(n->confirmed) *)&(n->confirmed) = (now); } while (0); } while (0);
}
}

void neigh_table_init(int index, struct neigh_table *tbl);
int neigh_table_clear(int index, struct neigh_table *tbl);
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
struct net_device *dev);
struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
const void *pkey);
struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
struct net_device *dev, bool want_ref);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *neigh_create(struct neigh_table *tbl,
const void *pkey,
struct net_device *dev)
{
return __neigh_create(tbl, pkey, dev, true);
}
void neigh_destroy(struct neighbour *neigh);
int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
const bool immediate_ok);
int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
u32 nlmsg_pid);
void __neigh_set_probe_once(struct neighbour *neigh);
bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl);
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev);
int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
struct neighbour *neigh_event_ns(struct neigh_table *tbl,
u8 *lladdr, void *saddr,
struct net_device *dev);

struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
struct neigh_table *tbl);
void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct net *neigh_parms_net(const struct neigh_parms *parms)
{
return read_pnet(&parms->net);
}

unsigned long neigh_rand_reach_time(unsigned long base);

void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
struct sk_buff *skb);
struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
const void *key, struct net_device *dev,
int creat);
struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
const void *key, struct net_device *dev);
int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
struct net_device *dev);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net *pneigh_net(const struct pneigh_entry *pneigh)
{
return read_pnet(&pneigh->net);
}

void neigh_app_ns(struct neighbour *n);
void neigh_for_each(struct neigh_table *tbl,
void (*cb)(struct neighbour *, void *), void *cookie);
void __neigh_for_each_release(struct neigh_table *tbl,
int (*cb)(struct neighbour *));
int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
void pneigh_for_each(struct neigh_table *tbl,
void (*cb)(struct pneigh_entry *));

struct neigh_seq_state {
struct seq_net_private p;
struct neigh_table *tbl;
struct neigh_hash_table *nht;
void *(*neigh_sub_iter)(struct neigh_seq_state *state,
struct neighbour *n, loff_t *pos);
unsigned int bucket;
unsigned int flags;



};
void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
unsigned int);
void *neigh_seq_next(struct seq_file *, void *, loff_t *);
void neigh_seq_stop(struct seq_file *, void *);

int neigh_proc_dointvec(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
void *buffer,
size_t *lenp, loff_t *ppos);
int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);

int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
proc_handler *proc_handler);
void neigh_sysctl_unregister(struct neigh_parms *p);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __neigh_parms_put(struct neigh_parms *parms)
{
refcount_dec(&parms->refcnt);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
{
refcount_inc(&parms->refcnt);
return parms;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void neigh_release(struct neighbour *neigh)
{
if (refcount_dec_and_test(&neigh->refcnt))
neigh_destroy(neigh);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour * neigh_clone(struct neighbour *neigh)
{
if (neigh)
refcount_inc(&neigh->refcnt);
return neigh;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int neigh_event_send_probe(struct neighbour *neigh,
struct sk_buff *skb,
const bool immediate_ok)
{
unsigned long now = jiffies;

if (({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_360(void) ; if (!((sizeof(neigh->used) == sizeof(char) || sizeof(neigh->used) == sizeof(short) || sizeof(neigh->used) == sizeof(int) || sizeof(neigh->used) == sizeof(long)) || sizeof(neigh->used) == sizeof(long long))) __compiletime_assert_360(); } while (0); (*(const volatile typeof( _Generic((neigh->used), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (neigh->used))) *)&(neigh->used)); }) != now)
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_361(void) ; if (!((sizeof(neigh->used) == sizeof(char) || sizeof(neigh->used) == sizeof(short) || sizeof(neigh->used) == sizeof(int) || sizeof(neigh->used) == sizeof(long)) || sizeof(neigh->used) == sizeof(long long))) __compiletime_assert_361(); } while (0); do { *(volatile typeof(neigh->used) *)&(neigh->used) = (now); } while (0); } while (0);
if (!(neigh->nud_state & ((0x80|0x40|0x02) | 0x08 | 0x10)))
return __neigh_event_send(neigh, skb, immediate_ok);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{
return neigh_event_send_probe(neigh, skb, true);
}
# 496 "./include/net/neighbour.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
{
unsigned int hh_alen = 0;
unsigned int seq;
unsigned int hh_len;

do {
seq = read_seqbegin(&hh->hh_lock);
hh_len = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_362(void) ; if (!((sizeof(hh->hh_len) == sizeof(char) || sizeof(hh->hh_len) == sizeof(short) || sizeof(hh->hh_len) == sizeof(int) || sizeof(hh->hh_len) == sizeof(long)) || sizeof(hh->hh_len) == sizeof(long long))) __compiletime_assert_362(); } while (0); (*(const volatile typeof( _Generic((hh->hh_len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (hh->hh_len))) *)&(hh->hh_len)); });
if (__builtin_expect(!!(hh_len <= 16), 1)) {
hh_alen = 16;





if (__builtin_expect(!!(skb_headroom(skb) >= 16), 1)) {

memcpy(skb->data - 16, hh->hh_data,
16);
}
} else {
hh_alen = (((hh_len)+(16 -1))&~(16 - 1));

if (__builtin_expect(!!(skb_headroom(skb) >= hh_alen), 1)) {
memcpy(skb->data - hh_alen, hh->hh_data,
hh_alen);
}
}
} while (read_seqretry(&hh->hh_lock, seq));

if (({ int __ret_warn_on = !!(skb_headroom(skb) < hh_alen); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/neighbour.h"), "i" (527), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) {
kfree_skb(skb);
return 0x01;
}

__skb_push(skb, hh_len);
return dev_queue_xmit(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int neigh_output(struct neighbour *n, struct sk_buff *skb,
bool skip_cache)
{
const struct hh_cache *hh = &n->hh;




if (!skip_cache &&
(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_363(void) ; if (!((sizeof(n->nud_state) == sizeof(char) || sizeof(n->nud_state) == sizeof(short) || sizeof(n->nud_state) == sizeof(int) || sizeof(n->nud_state) == sizeof(long)) || sizeof(n->nud_state) == sizeof(long long))) __compiletime_assert_363(); } while (0); (*(const volatile typeof( _Generic((n->nud_state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (n->nud_state))) *)&(n->nud_state)); }) & (0x80|0x40|0x02)) &&
({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_364(void) ; if (!((sizeof(hh->hh_len) == sizeof(char) || sizeof(hh->hh_len) == sizeof(short) || sizeof(hh->hh_len) == sizeof(int) || sizeof(hh->hh_len) == sizeof(long)) || sizeof(hh->hh_len) == sizeof(long long))) __compiletime_assert_364(); } while (0); (*(const volatile typeof( _Generic((hh->hh_len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (hh->hh_len))) *)&(hh->hh_len)); }))
return neigh_hh_output(hh, skb);

return n->output(n, skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *
__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
{
struct neighbour *n = neigh_lookup(tbl, pkey, dev);

if (n || !creat)
return n;

n = neigh_create(tbl, pkey, dev);
return IS_ERR(n) ? ((void *)0) : n;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *
__neigh_lookup_errno(struct neigh_table *tbl, const void *pkey,
struct net_device *dev)
{
struct neighbour *n = neigh_lookup(tbl, pkey, dev);

if (n)
return n;

return neigh_create(tbl, pkey, dev);
}

struct neighbour_cb {
unsigned long sched_next;
unsigned int flags;
};





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void neigh_ha_snapshot(char *dst, const struct neighbour *n,
const struct net_device *dev)
{
unsigned int seq;

do {
seq = read_seqbegin(&n->ha_lock);
memcpy(dst, n->ha, dev->addr_len);
} while (read_seqretry(&n->ha_lock, seq));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void neigh_update_is_router(struct neighbour *neigh, u32 flags,
int *notify)
{
u8 ndm_flags = 0;

ndm_flags |= (flags & ((((1UL))) << (6))) ? (1 << 7) : 0;
if ((neigh->flags ^ ndm_flags) & (1 << 7)) {
if (ndm_flags & (1 << 7))
neigh->flags |= (1 << 7);
else
neigh->flags &= ~(1 << 7);
*notify = 1;
}
}
# 20 "./include/net/dst.h" 2



struct sk_buff;

struct dst_entry {
struct net_device *dev;
struct dst_ops *ops;
unsigned long _metrics;
unsigned long expires;



void *__pad1;

int (*input)(struct sk_buff *);
int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);

unsigned short flags;
# 55 "./include/net/dst.h"
short obsolete;




unsigned short header_len;
unsigned short trailer_len;






atomic_t __refcnt;

int __use;
unsigned long lastuse;
struct lwtunnel_state *lwtstate;
struct callback_head callback_head;
short error;
short __pad;
__u32 tclassid;



netdevice_tracker dev_tracker;
};

struct dst_metrics {
u32 metrics[(__RTAX_MAX - 1)];
refcount_t refcnt;
} __attribute__((__aligned__(4)));
extern const struct dst_metrics dst_default_metrics;

u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
# 98 "./include/net/dst.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dst_metrics_read_only(const struct dst_entry *dst)
{
return dst->_metrics & 0x1UL;
}

void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_destroy_metrics_generic(struct dst_entry *dst)
{
unsigned long val = dst->_metrics;
if (!(val & 0x1UL))
__dst_destroy_metrics_generic(dst, val);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 *dst_metrics_write_ptr(struct dst_entry *dst)
{
unsigned long p = dst->_metrics;

do { if (__builtin_expect(!!(!p), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/dst.h"), "i" (116), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);

if (p & 0x1UL)
return dst->ops->cow_metrics(dst, p);
return ((u32 *)((p) & ~0x3UL));
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_init_metrics(struct dst_entry *dst,
const u32 *src_metrics,
bool read_only)
{
dst->_metrics = ((unsigned long) src_metrics) |
(read_only ? 0x1UL : 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
{
u32 *dst_metrics = dst_metrics_write_ptr(dest);

if (dst_metrics) {
u32 *src_metrics = ((u32 *)(((src)->_metrics) & ~0x3UL));

memcpy(dst_metrics, src_metrics, (__RTAX_MAX - 1) * sizeof(u32));
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 *dst_metrics_ptr(struct dst_entry *dst)
{
return ((u32 *)(((dst)->_metrics) & ~0x3UL));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32
dst_metric_raw(const struct dst_entry *dst, const int metric)
{
u32 *p = ((u32 *)(((dst)->_metrics) & ~0x3UL));

return p[metric-1];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32
dst_metric(const struct dst_entry *dst, const int metric)
{
({ int __ret_warn_on = !!(metric == RTAX_HOPLIMIT || metric == RTAX_ADVMSS || metric == RTAX_MTU); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/dst.h"), "i" (163), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });


return dst_metric_raw(dst, metric);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32
dst_metric_advmss(const struct dst_entry *dst)
{
u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);

if (!advmss)
advmss = dst->ops->default_advmss(dst);

return advmss;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
{
u32 *p = dst_metrics_write_ptr(dst);

if (p)
p[metric-1] = val;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32
dst_feature(const struct dst_entry *dst, u32 feature)
{
return dst_metric(dst, RTAX_FEATURES) & feature;
}

;
;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 dst_mtu(const struct dst_entry *dst)
{
return dst->ops->mtu(dst);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
{
return msecs_to_jiffies(dst_metric(dst, metric));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32
dst_allfrag(const struct dst_entry *dst)
{
int ret = dst_feature(dst, (1 << 3));
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
dst_metric_locked(const struct dst_entry *dst, int metric)
{
return dst_metric(dst, RTAX_LOCK) & (1 << metric);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_hold(struct dst_entry *dst)
{




do { __attribute__((__noreturn__)) extern void __compiletime_assert_365(void) ; if (!(!(__builtin_offsetof(struct dst_entry, __refcnt) & 63))) __compiletime_assert_365(); } while (0);
({ int __ret_warn_on = !!(atomic_inc_not_zero(&dst->__refcnt) == 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/dst.h"), "i" (231), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_use_noref(struct dst_entry *dst, unsigned long time)
{
if (__builtin_expect(!!(time != dst->lastuse), 0)) {
dst->__use++;
dst->lastuse = time;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_hold_and_use(struct dst_entry *dst, unsigned long time)
{
dst_hold(dst);
dst_use_noref(dst, time);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dst_entry *dst_clone(struct dst_entry *dst)
{
if (dst)
dst_hold(dst);
return dst;
}

void dst_release(struct dst_entry *dst);

void dst_release_immediate(struct dst_entry *dst);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void refdst_drop(unsigned long refdst)
{
if (!(refdst & 1UL))
dst_release((struct dst_entry *)(refdst & ~(1UL)));
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_dst_drop(struct sk_buff *skb)
{
if (skb->_skb_refdst) {
refdst_drop(skb->_skb_refdst);
skb->_skb_refdst = 0UL;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
{
nskb->slow_gro |= !!refdst;
nskb->_skb_refdst = refdst;
if (!(nskb->_skb_refdst & 1UL))
dst_clone(skb_dst(nskb));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
{
__skb_dst_copy(nskb, oskb->_skb_refdst);
}
# 299 "./include/net/dst.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dst_hold_safe(struct dst_entry *dst)
{
return atomic_inc_not_zero(&dst->__refcnt);
}
# 311 "./include/net/dst.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_dst_force(struct sk_buff *skb)
{
if (skb_dst_is_noref(skb)) {
struct dst_entry *dst = skb_dst(skb);

({ int __ret_warn_on = !!(!rcu_read_lock_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/dst.h"), "i" (316), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
if (!dst_hold_safe(dst))
dst = ((void *)0);

skb->_skb_refdst = (unsigned long)dst;
skb->slow_gro |= !!dst;
}

return skb->_skb_refdst != 0UL;
}
# 337 "./include/net/dst.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
struct net *net)
{
skb->dev = dev;






skb_clear_hash_if_not_l4(skb);
skb_set_queue_mapping(skb, 0);
skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
}
# 362 "./include/net/dst.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
struct net *net)
{

dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
__skb_tunnel_rx(skb, dev, net);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 dst_tclassid(const struct sk_buff *skb)
{







return 0;
}

int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dst_discard(struct sk_buff *skb)
{
return dst_discard_out(&init_net, skb->sk, skb);
}
void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
int initial_obsolete, unsigned short flags);
void dst_init(struct dst_entry *dst, struct dst_ops *ops,
struct net_device *dev, int initial_ref, int initial_obsolete,
unsigned short flags);
struct dst_entry *dst_destroy(struct dst_entry *dst);
void dst_dev_put(struct dst_entry *dst);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_confirm(struct dst_entry *dst)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
{
struct neighbour *n = dst->ops->neigh_lookup(dst, ((void *)0), daddr);
return IS_ERR(n) ? ((void *)0) : n;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
struct sk_buff *skb)
{
struct neighbour *n;

if (({ int __ret_warn_on = !!(!dst->ops->neigh_lookup); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/dst.h"), "i" (411), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }))
return ((void *)0);

n = dst->ops->neigh_lookup(dst, skb, ((void *)0));

return IS_ERR(n) ? ((void *)0) : n;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_confirm_neigh(const struct dst_entry *dst,
const void *daddr)
{
if (dst->ops->confirm_neigh)
dst->ops->confirm_neigh(dst, daddr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_link_failure(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
if (dst && dst->ops && dst->ops->link_failure)
dst->ops->link_failure(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_set_expires(struct dst_entry *dst, int timeout)
{
unsigned long expires = jiffies + timeout;

if (expires == 0)
expires = 1;

if (dst->expires == 0 || (({ unsigned long __dummy; typeof(dst->expires) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy; typeof(expires) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ((long)((expires) - (dst->expires)) < 0)))
dst->expires = expires;
}


;

;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return skb_dst(skb)->output(net, sk, skb);


}

;
;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int dst_input(struct sk_buff *skb)
{
return skb_dst(skb)->input(skb);

}


;

;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
{
if (dst->obsolete)
dst = dst->ops->check(dst, cookie);

return dst;
}


enum {
XFRM_LOOKUP_ICMP = 1 << 0,
XFRM_LOOKUP_QUEUE = 1 << 1,
XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
};

struct flowi;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dst_entry *xfrm_lookup(struct net *net,
struct dst_entry *dst_orig,
const struct flowi *fl,
const struct sock *sk,
int flags)
{
return dst_orig;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dst_entry *
xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, const struct sock *sk,
int flags, u32 if_id)
{
return dst_orig;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dst_entry *xfrm_lookup_route(struct net *net,
struct dst_entry *dst_orig,
const struct flowi *fl,
const struct sock *sk,
int flags)
{
return dst_orig;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
{
return ((void *)0);
}
# 539 "./include/net/dst.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
{
struct dst_entry *dst = skb_dst(skb);

if (dst && dst->ops->update_pmtu)
dst->ops->update_pmtu(dst, ((void *)0), skb, mtu, true);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
{
struct dst_entry *dst = skb_dst(skb);

if (dst && dst->ops->update_pmtu)
dst->ops->update_pmtu(dst, ((void *)0), skb, mtu, false);
}

struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu, bool confirm_neigh);
void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old);
struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr);
unsigned int dst_blackhole_mtu(const struct dst_entry *dst);
# 67 "./include/net/sock.h" 2

# 1 "./include/net/tcp_states.h" 1
# 12 "./include/net/tcp_states.h"
enum {
TCP_ESTABLISHED = 1,
TCP_SYN_SENT,
TCP_SYN_RECV,
TCP_FIN_WAIT1,
TCP_FIN_WAIT2,
TCP_TIME_WAIT,
TCP_CLOSE,
TCP_CLOSE_WAIT,
TCP_LAST_ACK,
TCP_LISTEN,
TCP_CLOSING,
TCP_NEW_SYN_RECV,

TCP_MAX_STATES
};





enum {
TCPF_ESTABLISHED = (1 << TCP_ESTABLISHED),
TCPF_SYN_SENT = (1 << TCP_SYN_SENT),
TCPF_SYN_RECV = (1 << TCP_SYN_RECV),
TCPF_FIN_WAIT1 = (1 << TCP_FIN_WAIT1),
TCPF_FIN_WAIT2 = (1 << TCP_FIN_WAIT2),
TCPF_TIME_WAIT = (1 << TCP_TIME_WAIT),
TCPF_CLOSE = (1 << TCP_CLOSE),
TCPF_CLOSE_WAIT = (1 << TCP_CLOSE_WAIT),
TCPF_LAST_ACK = (1 << TCP_LAST_ACK),
TCPF_LISTEN = (1 << TCP_LISTEN),
TCPF_CLOSING = (1 << TCP_CLOSING),
TCPF_NEW_SYN_RECV = (1 << TCP_NEW_SYN_RECV),
};
# 69 "./include/net/sock.h" 2
# 1 "./include/uapi/linux/net_tstamp.h" 1
# 17 "./include/uapi/linux/net_tstamp.h"
enum {
SOF_TIMESTAMPING_TX_HARDWARE = (1<<0),
SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1),
SOF_TIMESTAMPING_RX_HARDWARE = (1<<2),
SOF_TIMESTAMPING_RX_SOFTWARE = (1<<3),
SOF_TIMESTAMPING_SOFTWARE = (1<<4),
SOF_TIMESTAMPING_SYS_HARDWARE = (1<<5),
SOF_TIMESTAMPING_RAW_HARDWARE = (1<<6),
SOF_TIMESTAMPING_OPT_ID = (1<<7),
SOF_TIMESTAMPING_TX_SCHED = (1<<8),
SOF_TIMESTAMPING_TX_ACK = (1<<9),
SOF_TIMESTAMPING_OPT_CMSG = (1<<10),
SOF_TIMESTAMPING_OPT_TSONLY = (1<<11),
SOF_TIMESTAMPING_OPT_STATS = (1<<12),
SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13),
SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14),
SOF_TIMESTAMPING_BIND_PHC = (1 << 15),

SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_BIND_PHC,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
# 57 "./include/uapi/linux/net_tstamp.h"
struct so_timestamping {
int flags;
int bind_phc;
};
# 75 "./include/uapi/linux/net_tstamp.h"
struct hwtstamp_config {
int flags;
int tx_type;
int rx_filter;
};


enum hwtstamp_flags {






HWTSTAMP_FLAG_BONDED_PHC_INDEX = (1<<0),


HWTSTAMP_FLAG_LAST = HWTSTAMP_FLAG_BONDED_PHC_INDEX,
HWTSTAMP_FLAG_MASK = (HWTSTAMP_FLAG_LAST - 1) | HWTSTAMP_FLAG_LAST
};


enum hwtstamp_tx_types {





HWTSTAMP_TX_OFF,







HWTSTAMP_TX_ON,
# 120 "./include/uapi/linux/net_tstamp.h"
HWTSTAMP_TX_ONESTEP_SYNC,







HWTSTAMP_TX_ONESTEP_P2P,


__HWTSTAMP_TX_CNT
};


enum hwtstamp_rx_filters {

HWTSTAMP_FILTER_NONE,


HWTSTAMP_FILTER_ALL,


HWTSTAMP_FILTER_SOME,


HWTSTAMP_FILTER_PTP_V1_L4_EVENT,

HWTSTAMP_FILTER_PTP_V1_L4_SYNC,

HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ,

HWTSTAMP_FILTER_PTP_V2_L4_EVENT,

HWTSTAMP_FILTER_PTP_V2_L4_SYNC,

HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ,


HWTSTAMP_FILTER_PTP_V2_L2_EVENT,

HWTSTAMP_FILTER_PTP_V2_L2_SYNC,

HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ,


HWTSTAMP_FILTER_PTP_V2_EVENT,

HWTSTAMP_FILTER_PTP_V2_SYNC,

HWTSTAMP_FILTER_PTP_V2_DELAY_REQ,


HWTSTAMP_FILTER_NTP_ALL,


__HWTSTAMP_FILTER_CNT
};


struct scm_ts_pktinfo {
__u32 if_index;
__u32 pkt_length;
__u32 reserved[2];
};





enum txtime_flags {
SOF_TXTIME_DEADLINE_MODE = (1 << 0),
SOF_TXTIME_REPORT_ERRORS = (1 << 1),

SOF_TXTIME_FLAGS_LAST = SOF_TXTIME_REPORT_ERRORS,
SOF_TXTIME_FLAGS_MASK = (SOF_TXTIME_FLAGS_LAST - 1) |
SOF_TXTIME_FLAGS_LAST
};

struct sock_txtime {
__kernel_clockid_t clockid;
__u32 flags;
};
# 70 "./include/net/sock.h" 2
# 1 "./include/net/l3mdev.h" 1
# 11 "./include/net/l3mdev.h"
# 1 "./include/net/fib_rules.h" 1







# 1 "./include/uapi/linux/fib_rules.h" 1
# 19 "./include/uapi/linux/fib_rules.h"
struct fib_rule_hdr {
__u8 family;
__u8 dst_len;
__u8 src_len;
__u8 tos;

__u8 table;
__u8 res1;
__u8 res2;
__u8 action;

__u32 flags;
};

struct fib_rule_uid_range {
__u32 start;
__u32 end;
};

struct fib_rule_port_range {
__u16 start;
__u16 end;
};

enum {
FRA_UNSPEC,
FRA_DST,
FRA_SRC,
FRA_IIFNAME,

FRA_GOTO,
FRA_UNUSED2,
FRA_PRIORITY,
FRA_UNUSED3,
FRA_UNUSED4,
FRA_UNUSED5,
FRA_FWMARK,
FRA_FLOW,
FRA_TUN_ID,
FRA_SUPPRESS_IFGROUP,
FRA_SUPPRESS_PREFIXLEN,
FRA_TABLE,
FRA_FWMASK,
FRA_OIFNAME,
FRA_PAD,
FRA_L3MDEV,
FRA_UID_RANGE,
FRA_PROTOCOL,
FRA_IP_PROTO,
FRA_SPORT_RANGE,
FRA_DPORT_RANGE,
__FRA_MAX
};



enum {
FR_ACT_UNSPEC,
FR_ACT_TO_TBL,
FR_ACT_GOTO,
FR_ACT_NOP,
FR_ACT_RES3,
FR_ACT_RES4,
FR_ACT_BLACKHOLE,
FR_ACT_UNREACHABLE,
FR_ACT_PROHIBIT,
__FR_ACT_MAX,
};
# 9 "./include/net/fib_rules.h" 2



# 1 "./include/net/fib_notifier.h" 1







struct module;

struct fib_notifier_info {
int family;
struct netlink_ext_ack *extack;
};

enum fib_event_type {
FIB_EVENT_ENTRY_REPLACE,
FIB_EVENT_ENTRY_APPEND,
FIB_EVENT_ENTRY_ADD,
FIB_EVENT_ENTRY_DEL,
FIB_EVENT_RULE_ADD,
FIB_EVENT_RULE_DEL,
FIB_EVENT_NH_ADD,
FIB_EVENT_NH_DEL,
FIB_EVENT_VIF_ADD,
FIB_EVENT_VIF_DEL,
};

struct fib_notifier_ops {
int family;
struct list_head list;
unsigned int (*fib_seq_read)(struct net *net);
int (*fib_dump)(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
struct module *owner;
struct callback_head rcu;
};

int call_fib_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
struct fib_notifier_info *info);
int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
struct fib_notifier_info *info);
int register_fib_notifier(struct net *net, struct notifier_block *nb,
void (*cb)(struct notifier_block *nb),
struct netlink_ext_ack *extack);
int unregister_fib_notifier(struct net *net, struct notifier_block *nb);
struct fib_notifier_ops *
fib_notifier_ops_register(const struct fib_notifier_ops *tmpl, struct net *net);
void fib_notifier_ops_unregister(struct fib_notifier_ops *ops);
# 13 "./include/net/fib_rules.h" 2


struct fib_kuid_range {
kuid_t start;
kuid_t end;
};

struct fib_rule {
struct list_head list;
int iifindex;
int oifindex;
u32 mark;
u32 mark_mask;
u32 flags;
u32 table;
u8 action;
u8 l3mdev;
u8 proto;
u8 ip_proto;
u32 target;
__be64 tun_id;
struct fib_rule *ctarget;
struct net *fr_net;

refcount_t refcnt;
u32 pref;
int suppress_ifgroup;
int suppress_prefixlen;
char iifname[16];
char oifname[16];
struct fib_kuid_range uid_range;
struct fib_rule_port_range sport_range;
struct fib_rule_port_range dport_range;
struct callback_head rcu;
};

struct fib_lookup_arg {
void *lookup_ptr;
const void *lookup_data;
void *result;
struct fib_rule *rule;
u32 table;
int flags;


};

struct fib_rules_ops {
int family;
struct list_head list;
int rule_size;
int addr_size;
int unresolved_rules;
int nr_goto_rules;
unsigned int fib_rules_seq;

int (*action)(struct fib_rule *,
struct flowi *, int,
struct fib_lookup_arg *);
bool (*suppress)(struct fib_rule *, int,
struct fib_lookup_arg *);
int (*match)(struct fib_rule *,
struct flowi *, int);
int (*configure)(struct fib_rule *,
struct sk_buff *,
struct fib_rule_hdr *,
struct nlattr **,
struct netlink_ext_ack *);
int (*delete)(struct fib_rule *);
int (*compare)(struct fib_rule *,
struct fib_rule_hdr *,
struct nlattr **);
int (*fill)(struct fib_rule *, struct sk_buff *,
struct fib_rule_hdr *);
size_t (*nlmsg_payload)(struct fib_rule *);



void (*flush_cache)(struct fib_rules_ops *ops);

int nlgroup;
struct list_head rules_list;
struct module *owner;
struct net *fro_net;
struct callback_head rcu;
};

struct fib_rule_notifier_info {
struct fib_notifier_info info;
struct fib_rule *rule;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fib_rule_get(struct fib_rule *rule)
{
refcount_inc(&rule->refcnt);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fib_rule_put(struct fib_rule *rule)
{
if (refcount_dec_and_test(&rule->refcnt))
do { typeof (rule) ___p = (rule); if (___p) { do { __attribute__((__noreturn__)) extern void __compiletime_assert_366(void) ; if (!(!(!((__builtin_offsetof(typeof(*(rule)), rcu)) < 4096)))) __compiletime_assert_366(); } while (0); kvfree_call_rcu(&((___p)->rcu), (rcu_callback_t)(unsigned long) (__builtin_offsetof(typeof(*(rule)), rcu))); } } while (0);
}
# 123 "./include/net/fib_rules.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 fib_rule_get_table(struct fib_rule *rule,
struct fib_lookup_arg *arg)
{
return rule->table;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
{
if (nla[FRA_TABLE])
return nla_get_u32(nla[FRA_TABLE]);
return frh->table;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib_rule_port_range_set(const struct fib_rule_port_range *range)
{
return range->start != 0 && range->end != 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib_rule_port_inrange(const struct fib_rule_port_range *a,
__be16 port)
{
return (__builtin_constant_p((__u16)(( __u16)(__be16)(port))) ? ((__u16)( (((__u16)(( __u16)(__be16)(port)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(port)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(port))) >= a->start &&
(__builtin_constant_p((__u16)(( __u16)(__be16)(port))) ? ((__u16)( (((__u16)(( __u16)(__be16)(port)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(port)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(port))) <= a->end;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib_rule_port_range_valid(const struct fib_rule_port_range *a)
{
return a->start != 0 && a->end != 0 && a->end < 0xffff &&
a->start <= a->end;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib_rule_port_range_compare(struct fib_rule_port_range *a,
struct fib_rule_port_range *b)
{
return a->start == b->start &&
a->end == b->end;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib_rule_requires_fldissect(struct fib_rule *rule)
{
return rule->iifindex != 1 && (rule->ip_proto ||
fib_rule_port_range_set(&rule->sport_range) ||
fib_rule_port_range_set(&rule->dport_range));
}

struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *,
struct net *);
void fib_rules_unregister(struct fib_rules_ops *);

int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
struct fib_lookup_arg *);
int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
u32 flags);
bool fib_rule_matchall(const struct fib_rule *rule);
int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
struct netlink_ext_ack *extack);
unsigned int fib_rules_seq_read(struct net *net, int family);

int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack);
int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack);


;

;



;


;



;


;
# 12 "./include/net/l3mdev.h" 2

enum l3mdev_type {
L3MDEV_TYPE_UNSPEC,
L3MDEV_TYPE_VRF,
__L3MDEV_TYPE_MAX
};



typedef int (*lookup_by_table_id_t)(struct net *net, u32 table_d);
# 35 "./include/net/l3mdev.h"
struct l3mdev_ops {
u32 (*l3mdev_fib_table)(const struct net_device *dev);
struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev,
struct sk_buff *skb, u16 proto);
struct sk_buff * (*l3mdev_l3_out)(struct net_device *dev,
struct sock *sk, struct sk_buff *skb,
u16 proto);


struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *dev,
struct flowi6 *fl6);
};
# 223 "./include/net/l3mdev.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int l3mdev_master_ifindex_rcu(const struct net_device *dev)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int l3mdev_master_ifindex(struct net_device *dev)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int l3mdev_master_upper_ifindex_by_index(struct net *net, int ifindex)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 l3mdev_fib_table_rcu(const struct net_device *dev)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 l3mdev_fib_table(const struct net_device *dev)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool netif_index_is_l3_master(struct net *net, int ifindex)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
{
return skb;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
{
return skb;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
{
return skb;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
{
return skb;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int l3mdev_table_lookup_register(enum l3mdev_type l3type,
lookup_by_table_id_t fn)
{
return -95;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
lookup_by_table_id_t fn)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net,
u32 table_id)
{
return -19;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
struct fib_lookup_arg *arg)
{
return 1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void l3mdev_update_flow(struct net *net, struct flowi *fl)
{
}
# 71 "./include/net/sock.h" 2
# 96 "./include/net/sock.h"
typedef struct {
spinlock_t slock;
int owned;
wait_queue_head_t wq;







struct lockdep_map dep_map;

} socket_lock_t;

struct sock;
struct proto;
struct net;

typedef __u32 __portpair;
typedef __u64 __addrpair;
# 163 "./include/net/sock.h"
struct sock_common {



union {
__addrpair skc_addrpair;
struct {
__be32 skc_daddr;
__be32 skc_rcv_saddr;
};
};
union {
unsigned int skc_hash;
__u16 skc_u16hashes[2];
};

union {
__portpair skc_portpair;
struct {
__be16 skc_dport;
__u16 skc_num;
};
};

unsigned short skc_family;
volatile unsigned char skc_state;
unsigned char skc_reuse:4;
unsigned char skc_reuseport:1;
unsigned char skc_ipv6only:1;
unsigned char skc_net_refcnt:1;
int skc_bound_dev_if;
union {
struct hlist_node skc_bind_node;
struct hlist_node skc_portaddr_node;
};
struct proto *skc_prot;
possible_net_t skc_net;


struct in6_addr skc_v6_daddr;
struct in6_addr skc_v6_rcv_saddr;


atomic64_t skc_cookie;






union {
unsigned long skc_flags;
struct sock *skc_listener;
struct inet_timewait_death_row *skc_tw_dr;
};





int skc_dontcopy_begin[0];

union {
struct hlist_node skc_node;
struct hlist_nulls_node skc_nulls_node;
};
unsigned short skc_tx_queue_mapping;

unsigned short skc_rx_queue_mapping;

union {
int skc_incoming_cpu;
u32 skc_rcv_wnd;
u32 skc_tw_rcv_nxt;
};

refcount_t skc_refcnt;

int skc_dontcopy_end[0];
union {
u32 skc_rxhash;
u32 skc_window_clamp;
u32 skc_tw_snd_nxt;
};

};

struct bpf_local_storage;
struct sk_filter;
# 356 "./include/net/sock.h"
struct sock {




struct sock_common __sk_common;
# 397 "./include/net/sock.h"
struct dst_entry *sk_rx_dst;
int sk_rx_dst_ifindex;
u32 sk_rx_dst_cookie;

socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
# 414 "./include/net/sock.h"
struct {
atomic_t rmem_alloc;
int len;
struct sk_buff *head;
struct sk_buff *tail;
} sk_backlog;
struct llist_head defer_list;



int sk_forward_alloc;
u32 sk_reserved_mem;

unsigned int sk_ll_usec;

unsigned int sk_napi_id;

int sk_rcvbuf;

struct sk_filter *sk_filter;
union {
struct socket_wq *sk_wq;

struct socket_wq *sk_wq_raw;

};




struct dst_entry *sk_dst_cache;
atomic_t sk_omem_alloc;
int sk_sndbuf;


int sk_wmem_queued;
refcount_t sk_wmem_alloc;
unsigned long sk_tsq_flags;
union {
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
};
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
u32 sk_pacing_status;
long sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
unsigned long sk_pacing_rate;
unsigned long sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
__u32 sk_txhash;





u8 sk_gso_disabled : 1,
sk_kern_sock : 1,
sk_no_check_tx : 1,
sk_no_check_rx : 1,
sk_userlocks : 4;
u8 sk_pacing_shift;
u16 sk_type;
u16 sk_protocol;
u16 sk_gso_max_segs;
unsigned long sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err,
sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
u8 sk_txrehash;

u8 sk_prefer_busy_poll;
u16 sk_busy_poll_budget;

spinlock_t sk_peer_lock;
int sk_bind_phc;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;

long sk_rcvtimeo;
ktime_t sk_stamp;



u16 sk_tsflags;
u8 sk_shutdown;
atomic_t sk_tskey;
atomic_t sk_zckey;

u8 sk_clockid;
u8 sk_txtime_deadline_mode : 1,
sk_txtime_report_errors : 1,
sk_txtime_unused : 6;

struct socket *sk_socket;
void *sk_user_data;



struct sock_cgroup_data sk_cgrp_data;
struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *sk);
void (*sk_data_ready)(struct sock *sk);
void (*sk_write_space)(struct sock *sk);
void (*sk_error_report)(struct sock *sk);
int (*sk_backlog_rcv)(struct sock *sk,
struct sk_buff *skb);





void (*sk_destruct)(struct sock *sk);
struct sock_reuseport *sk_reuseport_cb;

struct bpf_local_storage *sk_bpf_storage;

struct callback_head sk_rcu;
netns_tracker ns_tracker;
};

enum sk_pacing {
SK_PACING_NONE = 0,
SK_PACING_NEEDED = 1,
SK_PACING_FQ = 2,
};
# 566 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_user_data_is_nocopy(const struct sock *sk)
{
return ((uintptr_t)sk->sk_user_data & 1UL);
}
# 592 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct net *sock_net(const struct sock *sk)
{
return read_pnet(&sk->__sk_common.skc_net);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void sock_net_set(struct sock *sk, struct net *net)
{
write_pnet(&sk->__sk_common.skc_net, net);
}
# 615 "./include/net/sock.h"
int sk_set_peek_off(struct sock *sk, int val);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_peek_offset(struct sock *sk, int flags)
{
if (__builtin_expect(!!(flags & 2), 0)) {
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_367(void) ; if (!((sizeof(sk->sk_peek_off) == sizeof(char) || sizeof(sk->sk_peek_off) == sizeof(short) || sizeof(sk->sk_peek_off) == sizeof(int) || sizeof(sk->sk_peek_off) == sizeof(long)) || sizeof(sk->sk_peek_off) == sizeof(long long))) __compiletime_assert_367(); } while (0); (*(const volatile typeof( _Generic((sk->sk_peek_off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_peek_off))) *)&(sk->sk_peek_off)); });
}

return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_peek_offset_bwd(struct sock *sk, int val)
{
s32 off = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_368(void) ; if (!((sizeof(sk->sk_peek_off) == sizeof(char) || sizeof(sk->sk_peek_off) == sizeof(short) || sizeof(sk->sk_peek_off) == sizeof(int) || sizeof(sk->sk_peek_off) == sizeof(long)) || sizeof(sk->sk_peek_off) == sizeof(long long))) __compiletime_assert_368(); } while (0); (*(const volatile typeof( _Generic((sk->sk_peek_off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_peek_off))) *)&(sk->sk_peek_off)); });

if (__builtin_expect(!!(off >= 0), 0)) {
off = __builtin_choose_expr(((!!(sizeof((typeof((s32)(off - val)) *)1 == (typeof((s32)(0)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((s32)(off - val)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((s32)(0)) * 0l)) : (int *)8))))), (((s32)(off - val)) > ((s32)(0)) ? ((s32)(off - val)) : ((s32)(0))), ({ typeof((s32)(off - val)) __UNIQUE_ID___x369 = ((s32)(off - val)); typeof((s32)(0)) __UNIQUE_ID___y370 = ((s32)(0)); ((__UNIQUE_ID___x369) > (__UNIQUE_ID___y370) ? (__UNIQUE_ID___x369) : (__UNIQUE_ID___y370)); }));
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_371(void) ; if (!((sizeof(sk->sk_peek_off) == sizeof(char) || sizeof(sk->sk_peek_off) == sizeof(short) || sizeof(sk->sk_peek_off) == sizeof(int) || sizeof(sk->sk_peek_off) == sizeof(long)) || sizeof(sk->sk_peek_off) == sizeof(long long))) __compiletime_assert_371(); } while (0); do { *(volatile typeof(sk->sk_peek_off) *)&(sk->sk_peek_off) = (off); } while (0); } while (0);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_peek_offset_fwd(struct sock *sk, int val)
{
sk_peek_offset_bwd(sk, -val);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *sk_entry(const struct hlist_node *node)
{
return ({ void *__mptr = (void *)(node); _Static_assert(__builtin_types_compatible_p(typeof(*(node)), typeof(((struct sock *)0)->__sk_common.skc_node)) || __builtin_types_compatible_p(typeof(*(node)), typeof(void)), "pointer type mismatch in container_of()"); ((struct sock *)(__mptr - __builtin_offsetof(struct sock, __sk_common.skc_node))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *__sk_head(const struct hlist_head *head)
{
return ({ void *__mptr = (void *)(head->first); _Static_assert(__builtin_types_compatible_p(typeof(*(head->first)), typeof(((struct sock *)0)->__sk_common.skc_node)) || __builtin_types_compatible_p(typeof(*(head->first)), typeof(void)), "pointer type mismatch in container_of()"); ((struct sock *)(__mptr - __builtin_offsetof(struct sock, __sk_common.skc_node))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *sk_head(const struct hlist_head *head)
{
return hlist_empty(head) ? ((void *)0) : __sk_head(head);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
{
return ({ void *__mptr = (void *)(head->first); _Static_assert(__builtin_types_compatible_p(typeof(*(head->first)), typeof(((struct sock *)0)->__sk_common.skc_nulls_node)) || __builtin_types_compatible_p(typeof(*(head->first)), typeof(void)), "pointer type mismatch in container_of()"); ((struct sock *)(__mptr - __builtin_offsetof(struct sock, __sk_common.skc_nulls_node))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
{
return hlist_nulls_empty(head) ? ((void *)0) : __sk_nulls_head(head);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *sk_next(const struct sock *sk)
{
return ({ typeof(sk->__sk_common.skc_node.next) ____ptr = (sk->__sk_common.skc_node.next); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((struct sock *)0)->__sk_common.skc_node)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((struct sock *)(__mptr - __builtin_offsetof(struct sock, __sk_common.skc_node))); }) : ((void *)0); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *sk_nulls_next(const struct sock *sk)
{
return (!is_a_nulls(sk->__sk_common.skc_nulls_node.next)) ?
({ void *__mptr = (void *)(sk->__sk_common.skc_nulls_node.next); _Static_assert(__builtin_types_compatible_p(typeof(*(sk->__sk_common.skc_nulls_node.next)), typeof(((struct sock *)0)->__sk_common.skc_nulls_node)) || __builtin_types_compatible_p(typeof(*(sk->__sk_common.skc_nulls_node.next)), typeof(void)), "pointer type mismatch in container_of()"); ((struct sock *)(__mptr - __builtin_offsetof(struct sock, __sk_common.skc_nulls_node))); }) :

((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_unhashed(const struct sock *sk)
{
return hlist_unhashed(&sk->__sk_common.skc_node);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_hashed(const struct sock *sk)
{
return !sk_unhashed(sk);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_node_init(struct hlist_node *node)
{
node->pprev = ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_nulls_node_init(struct hlist_nulls_node *node)
{
node->pprev = ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __sk_del_node(struct sock *sk)
{
__hlist_del(&sk->__sk_common.skc_node);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __sk_del_node_init(struct sock *sk)
{
if (sk_hashed(sk)) {
__sk_del_node(sk);
sk_node_init(&sk->__sk_common.skc_node);
return true;
}
return false;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void sock_hold(struct sock *sk)
{
refcount_inc(&sk->__sk_common.skc_refcnt);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __sock_put(struct sock *sk)
{
refcount_dec(&sk->__sk_common.skc_refcnt);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_del_node_init(struct sock *sk)
{
bool rc = __sk_del_node_init(sk);

if (rc) {

({ int __ret_warn_on = !!(refcount_read(&sk->__sk_common.skc_refcnt) == 1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/sock.h"), "i" (743), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
__sock_put(sk);
}
return rc;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __sk_nulls_del_node_init_rcu(struct sock *sk)
{
if (sk_hashed(sk)) {
hlist_nulls_del_init_rcu(&sk->__sk_common.skc_nulls_node);
return true;
}
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_nulls_del_node_init_rcu(struct sock *sk)
{
bool rc = __sk_nulls_del_node_init_rcu(sk);

if (rc) {

({ int __ret_warn_on = !!(refcount_read(&sk->__sk_common.skc_refcnt) == 1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/sock.h"), "i" (765), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
__sock_put(sk);
}
return rc;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __sk_add_node(struct sock *sk, struct hlist_head *list)
{
hlist_add_head(&sk->__sk_common.skc_node, list);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_add_node(struct sock *sk, struct hlist_head *list)
{
sock_hold(sk);
__sk_add_node(sk, list);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
{
sock_hold(sk);
if (1 && sk->__sk_common.skc_reuseport &&
sk->__sk_common.skc_family == 10)
hlist_add_tail_rcu(&sk->__sk_common.skc_node, list);
else
hlist_add_head_rcu(&sk->__sk_common.skc_node, list);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
{
sock_hold(sk);
hlist_add_tail_rcu(&sk->__sk_common.skc_node, list);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&sk->__sk_common.skc_nulls_node, list);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_tail_rcu(&sk->__sk_common.skc_nulls_node, list);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
sock_hold(sk);
__sk_nulls_add_node_rcu(sk, list);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __sk_del_bind_node(struct sock *sk)
{
__hlist_del(&sk->__sk_common.skc_bind_node);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_add_bind_node(struct sock *sk,
struct hlist_head *list)
{
hlist_add_head(&sk->__sk_common.skc_bind_node, list);
}
# 857 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct user_namespace *sk_user_ns(struct sock *sk)
{




return sk->sk_socket->file->f_cred->user_ns;
}


enum sock_flags {
SOCK_DEAD,
SOCK_DONE,
SOCK_URGINLINE,
SOCK_KEEPOPEN,
SOCK_LINGER,
SOCK_DESTROY,
SOCK_BROADCAST,
SOCK_TIMESTAMP,
SOCK_ZAPPED,
SOCK_USE_WRITE_QUEUE,
SOCK_DBG,
SOCK_RCVTSTAMP,
SOCK_RCVTSTAMPNS,
SOCK_LOCALROUTE,
SOCK_MEMALLOC,
SOCK_TIMESTAMPING_RX_SOFTWARE,
SOCK_FASYNC,
SOCK_RXQ_OVFL,
SOCK_ZEROCOPY,
SOCK_WIFI_STATUS,
SOCK_NOFCS,



SOCK_FILTER_LOCKED,
SOCK_SELECT_ERR_QUEUE,
SOCK_RCU_FREE,
SOCK_TXTIME,
SOCK_XDP,
SOCK_TSTAMP_NEW,
};



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_copy_flags(struct sock *nsk, struct sock *osk)
{
nsk->__sk_common.skc_flags = osk->__sk_common.skc_flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_set_flag(struct sock *sk, enum sock_flags flag)
{
arch___set_bit(flag, &sk->__sk_common.skc_flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_reset_flag(struct sock *sk, enum sock_flags flag)
{
arch___clear_bit(flag, &sk->__sk_common.skc_flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_valbool_flag(struct sock *sk, enum sock_flags bit,
int valbool)
{
if (valbool)
sock_set_flag(sk, bit);
else
sock_reset_flag(sk, bit);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sock_flag(const struct sock *sk, enum sock_flags flag)
{
return arch_test_bit(flag, &sk->__sk_common.skc_flags);
}


extern struct static_key_false memalloc_socks_key;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_memalloc_socks(void)
{
return __builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&memalloc_socks_key)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&memalloc_socks_key)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&memalloc_socks_key)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&memalloc_socks_key)->key) > 0; })), 0);
}

void __receive_sock(struct file *file);
# 950 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
{
return gfp_mask | (sk->sk_allocation & (( gfp_t)0x20000u));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_acceptq_removed(struct sock *sk)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_372(void) ; if (!((sizeof(sk->sk_ack_backlog) == sizeof(char) || sizeof(sk->sk_ack_backlog) == sizeof(short) || sizeof(sk->sk_ack_backlog) == sizeof(int) || sizeof(sk->sk_ack_backlog) == sizeof(long)) || sizeof(sk->sk_ack_backlog) == sizeof(long long))) __compiletime_assert_372(); } while (0); do { *(volatile typeof(sk->sk_ack_backlog) *)&(sk->sk_ack_backlog) = (sk->sk_ack_backlog - 1); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_acceptq_added(struct sock *sk)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_373(void) ; if (!((sizeof(sk->sk_ack_backlog) == sizeof(char) || sizeof(sk->sk_ack_backlog) == sizeof(short) || sizeof(sk->sk_ack_backlog) == sizeof(int) || sizeof(sk->sk_ack_backlog) == sizeof(long)) || sizeof(sk->sk_ack_backlog) == sizeof(long long))) __compiletime_assert_373(); } while (0); do { *(volatile typeof(sk->sk_ack_backlog) *)&(sk->sk_ack_backlog) = (sk->sk_ack_backlog + 1); } while (0); } while (0);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_acceptq_is_full(const struct sock *sk)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_374(void) ; if (!((sizeof(sk->sk_ack_backlog) == sizeof(char) || sizeof(sk->sk_ack_backlog) == sizeof(short) || sizeof(sk->sk_ack_backlog) == sizeof(int) || sizeof(sk->sk_ack_backlog) == sizeof(long)) || sizeof(sk->sk_ack_backlog) == sizeof(long long))) __compiletime_assert_374(); } while (0); (*(const volatile typeof( _Generic((sk->sk_ack_backlog), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_ack_backlog))) *)&(sk->sk_ack_backlog)); }) > ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_375(void) ; if (!((sizeof(sk->sk_max_ack_backlog) == sizeof(char) || sizeof(sk->sk_max_ack_backlog) == sizeof(short) || sizeof(sk->sk_max_ack_backlog) == sizeof(int) || sizeof(sk->sk_max_ack_backlog) == sizeof(long)) || sizeof(sk->sk_max_ack_backlog) == sizeof(long long))) __compiletime_assert_375(); } while (0); (*(const volatile typeof( _Generic((sk->sk_max_ack_backlog), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_max_ack_backlog))) *)&(sk->sk_max_ack_backlog)); });
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_stream_min_wspace(const struct sock *sk)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_376(void) ; if (!((sizeof(sk->sk_wmem_queued) == sizeof(char) || sizeof(sk->sk_wmem_queued) == sizeof(short) || sizeof(sk->sk_wmem_queued) == sizeof(int) || sizeof(sk->sk_wmem_queued) == sizeof(long)) || sizeof(sk->sk_wmem_queued) == sizeof(long long))) __compiletime_assert_376(); } while (0); (*(const volatile typeof( _Generic((sk->sk_wmem_queued), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_wmem_queued))) *)&(sk->sk_wmem_queued)); }) >> 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_stream_wspace(const struct sock *sk)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_377(void) ; if (!((sizeof(sk->sk_sndbuf) == sizeof(char) || sizeof(sk->sk_sndbuf) == sizeof(short) || sizeof(sk->sk_sndbuf) == sizeof(int) || sizeof(sk->sk_sndbuf) == sizeof(long)) || sizeof(sk->sk_sndbuf) == sizeof(long long))) __compiletime_assert_377(); } while (0); (*(const volatile typeof( _Generic((sk->sk_sndbuf), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_sndbuf))) *)&(sk->sk_sndbuf)); }) - ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_378(void) ; if (!((sizeof(sk->sk_wmem_queued) == sizeof(char) || sizeof(sk->sk_wmem_queued) == sizeof(short) || sizeof(sk->sk_wmem_queued) == sizeof(int) || sizeof(sk->sk_wmem_queued) == sizeof(long)) || sizeof(sk->sk_wmem_queued) == sizeof(long long))) __compiletime_assert_378(); } while (0); (*(const volatile typeof( _Generic((sk->sk_wmem_queued), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_wmem_queued))) *)&(sk->sk_wmem_queued)); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_wmem_queued_add(struct sock *sk, int val)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_379(void) ; if (!((sizeof(sk->sk_wmem_queued) == sizeof(char) || sizeof(sk->sk_wmem_queued) == sizeof(short) || sizeof(sk->sk_wmem_queued) == sizeof(int) || sizeof(sk->sk_wmem_queued) == sizeof(long)) || sizeof(sk->sk_wmem_queued) == sizeof(long long))) __compiletime_assert_379(); } while (0); do { *(volatile typeof(sk->sk_wmem_queued) *)&(sk->sk_wmem_queued) = (sk->sk_wmem_queued + val); } while (0); } while (0);
}

void sk_stream_write_space(struct sock *sk);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{

skb_dst_force(skb);

if (!sk->sk_backlog.tail)
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_380(void) ; if (!((sizeof(sk->sk_backlog.head) == sizeof(char) || sizeof(sk->sk_backlog.head) == sizeof(short) || sizeof(sk->sk_backlog.head) == sizeof(int) || sizeof(sk->sk_backlog.head) == sizeof(long)) || sizeof(sk->sk_backlog.head) == sizeof(long long))) __compiletime_assert_380(); } while (0); do { *(volatile typeof(sk->sk_backlog.head) *)&(sk->sk_backlog.head) = (skb); } while (0); } while (0);
else
sk->sk_backlog.tail->next = skb;

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_381(void) ; if (!((sizeof(sk->sk_backlog.tail) == sizeof(char) || sizeof(sk->sk_backlog.tail) == sizeof(short) || sizeof(sk->sk_backlog.tail) == sizeof(int) || sizeof(sk->sk_backlog.tail) == sizeof(long)) || sizeof(sk->sk_backlog.tail) == sizeof(long long))) __compiletime_assert_381(); } while (0); do { *(volatile typeof(sk->sk_backlog.tail) *)&(sk->sk_backlog.tail) = (skb); } while (0); } while (0);
skb->next = ((void *)0);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
{
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_backlog.rmem_alloc);

return qsize > limit;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
unsigned int limit)
{
if (sk_rcvqueues_full(sk, limit))
return -105;






if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
return -12;

__sk_add_backlog(sk, skb);
sk->sk_backlog.len += skb->truesize;
return 0;
}

int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);

;
;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
if (sk_memalloc_socks() && skb_pfmemalloc(skb))
return __sk_backlog_rcv(sk, skb);

return sk->sk_backlog_rcv(sk, skb);



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_incoming_cpu_update(struct sock *sk)
{
int cpu = (((struct thread_info *)get_current())->cpu);

if (__builtin_expect(!!(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_382(void) ; if (!((sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(char) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(short) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(int) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(long)) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(long long))) __compiletime_assert_382(); } while (0); (*(const volatile typeof( _Generic((sk->__sk_common.skc_incoming_cpu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->__sk_common.skc_incoming_cpu))) *)&(sk->__sk_common.skc_incoming_cpu)); }) != cpu), 0))
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_383(void) ; if (!((sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(char) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(short) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(int) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(long)) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(long long))) __compiletime_assert_383(); } while (0); do { *(volatile typeof(sk->__sk_common.skc_incoming_cpu) *)&(sk->__sk_common.skc_incoming_cpu) = (cpu); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_rps_record_flow_hash(__u32 hash)
{

struct rps_sock_flow_table *sock_flow_table;

rcu_read_lock();
sock_flow_table = ({ typeof(*(rps_sock_flow_table)) *__UNIQUE_ID_rcu384 = (typeof(*(rps_sock_flow_table)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_385(void) ; if (!((sizeof((rps_sock_flow_table)) == sizeof(char) || sizeof((rps_sock_flow_table)) == sizeof(short) || sizeof((rps_sock_flow_table)) == sizeof(int) || sizeof((rps_sock_flow_table)) == sizeof(long)) || sizeof((rps_sock_flow_table)) == sizeof(long long))) __compiletime_assert_385(); } while (0); (*(const volatile typeof( _Generic(((rps_sock_flow_table)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rps_sock_flow_table)))) *)&((rps_sock_flow_table))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rps_sock_flow_table)) *)(__UNIQUE_ID_rcu384)); });
rps_record_sock_flow(sock_flow_table, hash);
rcu_read_unlock();

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_rps_record_flow(const struct sock *sk)
{

if (__builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&rfs_needed)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&rfs_needed)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&rfs_needed)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&rfs_needed)->key) > 0; })), 0)) {
# 1091 "./include/net/sock.h"
if (sk->__sk_common.skc_state == TCP_ESTABLISHED)
sock_rps_record_flow_hash(sk->__sk_common.skc_rxhash);
}

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_rps_save_rxhash(struct sock *sk,
const struct sk_buff *skb)
{

if (__builtin_expect(!!(sk->__sk_common.skc_rxhash != skb->hash), 0))
sk->__sk_common.skc_rxhash = skb->hash;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_rps_reset_rxhash(struct sock *sk)
{

sk->__sk_common.skc_rxhash = 0;

}
# 1128 "./include/net/sock.h"
int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
void sk_stream_wait_close(struct sock *sk, long timeo_p);
int sk_stream_error(struct sock *sk, int flags, int err);
void sk_stream_kill_queues(struct sock *sk);
void sk_set_memalloc(struct sock *sk);
void sk_clear_memalloc(struct sock *sk);

void __sk_flush_backlog(struct sock *sk);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_flush_backlog(struct sock *sk)
{
if (__builtin_expect(!!(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_386(void) ; if (!((sizeof(sk->sk_backlog.tail) == sizeof(char) || sizeof(sk->sk_backlog.tail) == sizeof(short) || sizeof(sk->sk_backlog.tail) == sizeof(int) || sizeof(sk->sk_backlog.tail) == sizeof(long)) || sizeof(sk->sk_backlog.tail) == sizeof(long long))) __compiletime_assert_386(); } while (0); (*(const volatile typeof( _Generic((sk->sk_backlog.tail), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_backlog.tail))) *)&(sk->sk_backlog.tail)); })), 0)) {
__sk_flush_backlog(sk);
return true;
}
return false;
}

int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);

struct request_sock_ops;
struct timewait_sock_ops;
struct inet_hashinfo;
struct raw_hashinfo;
struct smc_hashinfo;
struct module;
struct sk_psock;





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_prot_clear_nulls(struct sock *sk, int size)
{
if (__builtin_offsetof(struct sock, __sk_common.skc_node.next) != 0)
memset(sk, 0, __builtin_offsetof(struct sock, __sk_common.skc_node.next));
memset(&sk->__sk_common.skc_node.pprev, 0,
size - __builtin_offsetof(struct sock, __sk_common.skc_node.pprev));
}




struct proto {
void (*close)(struct sock *sk,
long timeout);
int (*pre_connect)(struct sock *sk,
struct sockaddr *uaddr,
int addr_len);
int (*connect)(struct sock *sk,
struct sockaddr *uaddr,
int addr_len);
int (*disconnect)(struct sock *sk, int flags);

struct sock * (*accept)(struct sock *sk, int flags, int *err,
bool kern);

int (*ioctl)(struct sock *sk, int cmd,
unsigned long arg);
int (*init)(struct sock *sk);
void (*destroy)(struct sock *sk);
void (*shutdown)(struct sock *sk, int how);
int (*setsockopt)(struct sock *sk, int level,
int optname, sockptr_t optval,
unsigned int optlen);
int (*getsockopt)(struct sock *sk, int level,
int optname, char *optval,
int *option);
void (*keepalive)(struct sock *sk, int valbool);




int (*sendmsg)(struct sock *sk, struct msghdr *msg,
size_t len);
int (*recvmsg)(struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags,
int *addr_len);
int (*sendpage)(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int (*bind)(struct sock *sk,
struct sockaddr *addr, int addr_len);
int (*bind_add)(struct sock *sk,
struct sockaddr *addr, int addr_len);

int (*backlog_rcv) (struct sock *sk,
struct sk_buff *skb);
bool (*bpf_bypass_getsockopt)(int level,
int optname);

void (*release_cb)(struct sock *sk);


int (*hash)(struct sock *sk);
void (*unhash)(struct sock *sk);
void (*rehash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
void (*put_port)(struct sock *sk);

int (*psock_update_sk_prot)(struct sock *sk,
struct sk_psock *psock,
bool restore);




unsigned int inuse_idx;






bool (*stream_memory_free)(const struct sock *sk, int wake);
bool (*sock_is_readable)(struct sock *sk);

void (*enter_memory_pressure)(struct sock *sk);
void (*leave_memory_pressure)(struct sock *sk);
atomic_long_t *memory_allocated;
struct percpu_counter *sockets_allocated;







unsigned long *memory_pressure;
long *sysctl_mem;

int *sysctl_wmem;
int *sysctl_rmem;
u32 sysctl_wmem_offset;
u32 sysctl_rmem_offset;

int max_header;
bool no_autobind;

struct kmem_cache *slab;
unsigned int obj_size;
slab_flags_t slab_flags;
unsigned int useroffset;
unsigned int usersize;

unsigned int *orphan_count;

struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;

union {
struct inet_hashinfo *hashinfo;
struct udp_table *udp_table;
struct raw_hashinfo *raw_hash;
struct smc_hashinfo *smc_hash;
} h;

struct module *owner;

char name[32];

struct list_head node;



int (*diag_destroy)(struct sock *sk, int err);
} ;

int proto_register(struct proto *prot, int alloc_slab);
void proto_unregister(struct proto *prot);
int sock_load_diag_module(int family, int protocol);
# 1325 "./include/net/sock.h"
;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_forward_alloc_get(const struct sock *sk)
{




return sk->sk_forward_alloc;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
if (({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_387(void) ; if (!((sizeof(sk->sk_wmem_queued) == sizeof(char) || sizeof(sk->sk_wmem_queued) == sizeof(short) || sizeof(sk->sk_wmem_queued) == sizeof(int) || sizeof(sk->sk_wmem_queued) == sizeof(long)) || sizeof(sk->sk_wmem_queued) == sizeof(long long))) __compiletime_assert_387(); } while (0); (*(const volatile typeof( _Generic((sk->sk_wmem_queued), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_wmem_queued))) *)&(sk->sk_wmem_queued)); }) >= ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_388(void) ; if (!((sizeof(sk->sk_sndbuf) == sizeof(char) || sizeof(sk->sk_sndbuf) == sizeof(short) || sizeof(sk->sk_sndbuf) == sizeof(int) || sizeof(sk->sk_sndbuf) == sizeof(long)) || sizeof(sk->sk_sndbuf) == sizeof(long long))) __compiletime_assert_388(); } while (0); (*(const volatile typeof( _Generic((sk->sk_sndbuf), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_sndbuf))) *)&(sk->sk_sndbuf)); }))
return false;

return sk->__sk_common.skc_prot->stream_memory_free ?
sk->__sk_common.skc_prot->stream_memory_free(sk, wake) : true;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_stream_memory_free(const struct sock *sk)
{
return __sk_stream_memory_free(sk, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __sk_stream_is_writeable(const struct sock *sk, int wake)
{
return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
__sk_stream_memory_free(sk, wake);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_stream_is_writeable(const struct sock *sk)
{
return __sk_stream_is_writeable(sk, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_under_cgroup_hierarchy(struct sock *sk,
struct cgroup *ancestor)
{

return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
ancestor);



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_has_memory_pressure(const struct sock *sk)
{
return sk->__sk_common.skc_prot->memory_pressure != ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_under_memory_pressure(const struct sock *sk)
{
if (!sk->__sk_common.skc_prot->memory_pressure)
return false;

if (0 && sk->sk_memcg &&
mem_cgroup_under_socket_pressure(sk->sk_memcg))
return true;

return !!*sk->__sk_common.skc_prot->memory_pressure;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long
sk_memory_allocated(const struct sock *sk)
{
return atomic_long_read(sk->__sk_common.skc_prot->memory_allocated);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long
sk_memory_allocated_add(struct sock *sk, int amt)
{
return atomic_long_add_return(amt, sk->__sk_common.skc_prot->memory_allocated);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
sk_memory_allocated_sub(struct sock *sk, int amt)
{
atomic_long_sub(amt, sk->__sk_common.skc_prot->memory_allocated);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_sockets_allocated_dec(struct sock *sk)
{
percpu_counter_add_batch(sk->__sk_common.skc_prot->sockets_allocated, -1,
16);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_sockets_allocated_inc(struct sock *sk)
{
percpu_counter_add_batch(sk->__sk_common.skc_prot->sockets_allocated, 1,
16);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64
sk_sockets_allocated_read_positive(struct sock *sk)
{
return percpu_counter_read_positive(sk->__sk_common.skc_prot->sockets_allocated);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
proto_sockets_allocated_sum_positive(struct proto *prot)
{
return percpu_counter_sum_positive(prot->sockets_allocated);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long
proto_memory_allocated(struct proto *prot)
{
return atomic_long_read(prot->memory_allocated);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
proto_memory_pressure(struct proto *prot)
{
if (!prot->memory_pressure)
return false;
return !!*prot->memory_pressure;
}




struct prot_inuse {
int all;
int val[64];
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_prot_inuse_add(const struct net *net,
const struct proto *prot, int val)
{
do { do { const void *__vpp_verify = (typeof((&(net->core.prot_inuse->val[prot->inuse_idx])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(net->core.prot_inuse->val[prot->inuse_idx])) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(net->core.prot_inuse->val[prot->inuse_idx])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net->core.prot_inuse->val[prot->inuse_idx]))) *)(&(net->core.prot_inuse->val[prot->inuse_idx]))); (typeof((typeof(*(&(net->core.prot_inuse->val[prot->inuse_idx]))) *)(&(net->core.prot_inuse->val[prot->inuse_idx])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += val; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(net->core.prot_inuse->val[prot->inuse_idx])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net->core.prot_inuse->val[prot->inuse_idx]))) *)(&(net->core.prot_inuse->val[prot->inuse_idx]))); (typeof((typeof(*(&(net->core.prot_inuse->val[prot->inuse_idx]))) *)(&(net->core.prot_inuse->val[prot->inuse_idx])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += val; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(net->core.prot_inuse->val[prot->inuse_idx])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net->core.prot_inuse->val[prot->inuse_idx]))) *)(&(net->core.prot_inuse->val[prot->inuse_idx]))); (typeof((typeof(*(&(net->core.prot_inuse->val[prot->inuse_idx]))) *)(&(net->core.prot_inuse->val[prot->inuse_idx])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += val; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(net->core.prot_inuse->val[prot->inuse_idx])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net->core.prot_inuse->val[prot->inuse_idx]))) *)(&(net->core.prot_inuse->val[prot->inuse_idx]))); (typeof((typeof(*(&(net->core.prot_inuse->val[prot->inuse_idx]))) *)(&(net->core.prot_inuse->val[prot->inuse_idx])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += val; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_inuse_add(const struct net *net, int val)
{
do { do { const void *__vpp_verify = (typeof((&(net->core.prot_inuse->all)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(net->core.prot_inuse->all)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(net->core.prot_inuse->all)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net->core.prot_inuse->all))) *)(&(net->core.prot_inuse->all))); (typeof((typeof(*(&(net->core.prot_inuse->all))) *)(&(net->core.prot_inuse->all)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += val; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(net->core.prot_inuse->all)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net->core.prot_inuse->all))) *)(&(net->core.prot_inuse->all))); (typeof((typeof(*(&(net->core.prot_inuse->all))) *)(&(net->core.prot_inuse->all)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += val; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(net->core.prot_inuse->all)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net->core.prot_inuse->all))) *)(&(net->core.prot_inuse->all))); (typeof((typeof(*(&(net->core.prot_inuse->all))) *)(&(net->core.prot_inuse->all)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += val; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(net->core.prot_inuse->all)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(net->core.prot_inuse->all))) *)(&(net->core.prot_inuse->all))); (typeof((typeof(*(&(net->core.prot_inuse->all))) *)(&(net->core.prot_inuse->all)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += val; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

int sock_prot_inuse_get(struct net *net, struct proto *proto);
int sock_inuse_get(struct net *net);
# 1484 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __sk_prot_rehash(struct sock *sk)
{
sk->__sk_common.skc_prot->unhash(sk);
return sk->__sk_common.skc_prot->hash(sk);
}
# 1503 "./include/net/sock.h"
struct socket_alloc {
struct socket socket;
struct inode vfs_inode;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct socket *SOCKET_I(struct inode *inode)
{
return &({ void *__mptr = (void *)(inode); _Static_assert(__builtin_types_compatible_p(typeof(*(inode)), typeof(((struct socket_alloc *)0)->vfs_inode)) || __builtin_types_compatible_p(typeof(*(inode)), typeof(void)), "pointer type mismatch in container_of()"); ((struct socket_alloc *)(__mptr - __builtin_offsetof(struct socket_alloc, vfs_inode))); })->socket;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inode *SOCK_INODE(struct socket *socket)
{
return &({ void *__mptr = (void *)(socket); _Static_assert(__builtin_types_compatible_p(typeof(*(socket)), typeof(((struct socket_alloc *)0)->socket)) || __builtin_types_compatible_p(typeof(*(socket)), typeof(void)), "pointer type mismatch in container_of()"); ((struct socket_alloc *)(__mptr - __builtin_offsetof(struct socket_alloc, socket))); })->vfs_inode;
}




int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
int __sk_mem_schedule(struct sock *sk, int size, int kind);
void __sk_mem_reduce_allocated(struct sock *sk, int amount);
void __sk_mem_reclaim(struct sock *sk, int amount);
# 1535 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long sk_prot_mem_limits(const struct sock *sk, int index)
{
long val = sk->__sk_common.skc_prot->sysctl_mem[index];






return val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_mem_pages(int amt)
{
return (amt + 4096 - 1) >> ( __builtin_constant_p(4096) ? ((4096) < 2 ? 0 : 63 - __builtin_clzll(4096)) : (sizeof(4096) <= 4) ? __ilog2_u32(4096) : __ilog2_u64(4096) );
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_has_account(struct sock *sk)
{

return !!sk->__sk_common.skc_prot->memory_allocated;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_wmem_schedule(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return true;
return size <= sk->sk_forward_alloc ||
__sk_mem_schedule(sk, size, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
{
if (!sk_has_account(sk))
return true;
return size <= sk->sk_forward_alloc ||
__sk_mem_schedule(sk, size, 1) ||
skb_pfmemalloc(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_unused_reserved_mem(const struct sock *sk)
{
int unused_mem;

if (__builtin_expect(!!(!sk->sk_reserved_mem), 1))
return 0;

unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
atomic_read(&sk->sk_backlog.rmem_alloc);

return unused_mem > 0 ? unused_mem : 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_mem_reclaim(struct sock *sk)
{
int reclaimable;

if (!sk_has_account(sk))
return;

reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);

if (reclaimable >= 4096)
__sk_mem_reclaim(sk, reclaimable);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_mem_reclaim_final(struct sock *sk)
{
sk->sk_reserved_mem = 0;
sk_mem_reclaim(sk);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_mem_reclaim_partial(struct sock *sk)
{
int reclaimable;

if (!sk_has_account(sk))
return;

reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);

if (reclaimable > 4096)
__sk_mem_reclaim(sk, reclaimable - 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_mem_charge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc -= size;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_mem_uncharge(struct sock *sk, int size)
{
int reclaimable;

if (!sk_has_account(sk))
return;
sk->sk_forward_alloc += size;
reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
# 1649 "./include/net/sock.h"
if (__builtin_expect(!!(reclaimable >= (1 << 21)), 0))
__sk_mem_reclaim(sk, (1 << 20));
}
# 1672 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool lockdep_sock_is_held(const struct sock *sk)
{
return lock_is_held(&(&sk->sk_lock)->dep_map) ||
lock_is_held(&(&sk->sk_lock.slock)->dep_map);
}

void lock_sock_nested(struct sock *sk, int subclass);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lock_sock(struct sock *sk)
{
lock_sock_nested(sk, 0);
}

void __lock_sock(struct sock *sk);
void __release_sock(struct sock *sk);
void release_sock(struct sock *sk);
# 1696 "./include/net/sock.h"
bool __lock_sock_fast(struct sock *sk) ;
# 1711 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool lock_sock_fast(struct sock *sk)
{

lock_acquire(&sk->sk_lock.dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));

return __lock_sock_fast(sk);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool lock_sock_fast_nested(struct sock *sk)
{
lock_acquire(&sk->sk_lock.dep_map, 1, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));

return __lock_sock_fast(sk);
}
# 1735 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void unlock_sock_fast(struct sock *sk, bool slow)

{
if (slow) {
release_sock(sk);
(void)0;
} else {
lock_release(&sk->sk_lock.dep_map, (unsigned long)__builtin_return_address(0));
spin_unlock_bh(&sk->sk_lock.slock);
}
}
# 1761 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_owned_by_me(const struct sock *sk)
{

({ int __ret_warn_on = !!(!lockdep_sock_is_held(sk) && debug_locks); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/sock.h"), "i" (1764), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sock_owned_by_user(const struct sock *sk)
{
sock_owned_by_me(sk);
return sk->sk_lock.owned;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sock_owned_by_user_nocheck(const struct sock *sk)
{
return sk->sk_lock.owned;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_release_ownership(struct sock *sk)
{
if (sock_owned_by_user_nocheck(sk)) {
sk->sk_lock.owned = 0;


lock_release(&sk->sk_lock.dep_map, (unsigned long)__builtin_return_address(0));
}
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sock_allow_reclassification(const struct sock *csk)
{
struct sock *sk = (struct sock *)csk;

return !sock_owned_by_user_nocheck(sk) &&
!spin_is_locked(&sk->sk_lock.slock);
}

struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot, int kern);
void sk_free(struct sock *sk);
void sk_destruct(struct sock *sk);
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
void sk_free_unlock_clone(struct sock *sk);

struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
gfp_t priority);
void __sock_wfree(struct sk_buff *skb);
void sock_wfree(struct sk_buff *skb);
struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
gfp_t priority);
void skb_orphan_partial(struct sk_buff *skb);
void sock_rfree(struct sk_buff *skb);
void sock_efree(struct sk_buff *skb);

void sock_edemux(struct sk_buff *skb);
void sock_pfree(struct sk_buff *skb);




int sock_setsockopt(struct socket *sock, int level, int op,
sockptr_t optval, unsigned int optlen);

int sock_getsockopt(struct socket *sock, int level, int op,
char *optval, int *optlen);
int sock_gettstamp(struct socket *sock, void *userstamp,
bool timeval, bool time32);
struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
int noblock, int *errcode);
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
unsigned long data_len, int noblock,
int *errcode, int max_page_order);
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
void sock_kfree_s(struct sock *sk, void *mem, int size);
void sock_kzfree_s(struct sock *sk, void *mem, int size);
void sk_send_sigurg(struct sock *sk);

struct sockcm_cookie {
u64 transmit_time;
u32 mark;
u16 tsflags;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sockcm_init(struct sockcm_cookie *sockc,
const struct sock *sk)
{
*sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
}

int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
struct sockcm_cookie *sockc);
int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
struct sockcm_cookie *sockc);





int sock_no_bind(struct socket *, struct sockaddr *, int);
int sock_no_connect(struct socket *, struct sockaddr *, int, int);
int sock_no_socketpair(struct socket *, struct socket *);
int sock_no_accept(struct socket *, struct socket *, int, bool);
int sock_no_getname(struct socket *, struct sockaddr *, int);
int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
int sock_no_listen(struct socket *, int);
int sock_no_shutdown(struct socket *, int);
int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
int sock_no_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma);
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
int offset, size_t size, int flags);





int sock_common_getsockopt(struct socket *sock, int level, int optname,
char *optval, int *optlen);
int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);
int sock_common_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen);

void sk_common_release(struct sock *sk);






void sock_init_data(struct socket *sock, struct sock *sk);
# 1923 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_put(struct sock *sk)
{
if (refcount_dec_and_test(&sk->__sk_common.skc_refcnt))
sk_free(sk);
}



void sock_gen_put(struct sock *sk);

int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
unsigned int trim_cap, bool refcounted);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
const int nested)
{
return __sk_receive_skb(sk, skb, nested, 1, true);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_tx_queue_set(struct sock *sk, int tx_queue)
{

if (({ int __ret_warn_on = !!((unsigned short)tx_queue >= ((unsigned short)~0U)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/sock.h"), "i" (1944), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }))
return;
sk->__sk_common.skc_tx_queue_mapping = tx_queue;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_tx_queue_clear(struct sock *sk)
{
sk->__sk_common.skc_tx_queue_mapping = ((unsigned short)~0U);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_tx_queue_get(const struct sock *sk)
{
if (sk && sk->__sk_common.skc_tx_queue_mapping != ((unsigned short)~0U))
return sk->__sk_common.skc_tx_queue_mapping;

return -1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __sk_rx_queue_set(struct sock *sk,
const struct sk_buff *skb,
bool force_set)
{

if (skb_rx_queue_recorded(skb)) {
u16 rx_queue = skb_get_rx_queue(skb);

if (force_set ||
__builtin_expect(!!(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_389(void) ; if (!((sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(char) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(short) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(int) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(long)) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(long long))) __compiletime_assert_389(); } while (0); (*(const volatile typeof( _Generic((sk->__sk_common.skc_rx_queue_mapping), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->__sk_common.skc_rx_queue_mapping))) *)&(sk->__sk_common.skc_rx_queue_mapping)); }) != rx_queue), 0))
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_390(void) ; if (!((sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(char) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(short) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(int) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(long)) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(long long))) __compiletime_assert_390(); } while (0); do { *(volatile typeof(sk->__sk_common.skc_rx_queue_mapping) *)&(sk->__sk_common.skc_rx_queue_mapping) = (rx_queue); } while (0); } while (0);
}

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
{
__sk_rx_queue_set(sk, skb, true);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb)
{
__sk_rx_queue_set(sk, skb, false);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_rx_queue_clear(struct sock *sk)
{

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_391(void) ; if (!((sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(char) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(short) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(int) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(long)) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(long long))) __compiletime_assert_391(); } while (0); do { *(volatile typeof(sk->__sk_common.skc_rx_queue_mapping) *)&(sk->__sk_common.skc_rx_queue_mapping) = (((unsigned short)~0U)); } while (0); } while (0);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_rx_queue_get(const struct sock *sk)
{

if (sk) {
int res = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_392(void) ; if (!((sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(char) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(short) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(int) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(long)) || sizeof(sk->__sk_common.skc_rx_queue_mapping) == sizeof(long long))) __compiletime_assert_392(); } while (0); (*(const volatile typeof( _Generic((sk->__sk_common.skc_rx_queue_mapping), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->__sk_common.skc_rx_queue_mapping))) *)&(sk->__sk_common.skc_rx_queue_mapping)); });

if (res != ((unsigned short)~0U))
return res;
}


return -1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_set_socket(struct sock *sk, struct socket *sock)
{
sk->sk_socket = sock;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) wait_queue_head_t *sk_sleep(struct sock *sk)
{
do { __attribute__((__noreturn__)) extern void __compiletime_assert_393(void) ; if (!(!(__builtin_offsetof(struct socket_wq, wait) != 0))) __compiletime_assert_393(); } while (0);
return &({ typeof(sk->sk_wq) __UNIQUE_ID_rcu394 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_395(void) ; if (!((sizeof(sk->sk_wq) == sizeof(char) || sizeof(sk->sk_wq) == sizeof(short) || sizeof(sk->sk_wq) == sizeof(int) || sizeof(sk->sk_wq) == sizeof(long)) || sizeof(sk->sk_wq) == sizeof(long long))) __compiletime_assert_395(); } while (0); (*(const volatile typeof( _Generic((sk->sk_wq), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_wq))) *)&(sk->sk_wq)); }); ((typeof(*sk->sk_wq) *)(__UNIQUE_ID_rcu394)); })->wait;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_orphan(struct sock *sk)
{
_raw_write_lock_bh(&sk->sk_callback_lock);
sock_set_flag(sk, SOCK_DEAD);
sk_set_socket(sk, ((void *)0));
sk->sk_wq = ((void *)0);
_raw_write_unlock_bh(&sk->sk_callback_lock);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_graft(struct sock *sk, struct socket *parent)
{
({ int __ret_warn_on = !!(parent->sk); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/sock.h"), "i" (2038), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
_raw_write_lock_bh(&sk->sk_callback_lock);
do { uintptr_t _r_a_p__v = (uintptr_t)(&parent->wq); ; if (__builtin_constant_p(&parent->wq) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_396(void) ; if (!((sizeof((sk->sk_wq)) == sizeof(char) || sizeof((sk->sk_wq)) == sizeof(short) || sizeof((sk->sk_wq)) == sizeof(int) || sizeof((sk->sk_wq)) == sizeof(long)) || sizeof((sk->sk_wq)) == sizeof(long long))) __compiletime_assert_396(); } while (0); do { *(volatile typeof((sk->sk_wq)) *)&((sk->sk_wq)) = ((typeof(sk->sk_wq))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_397(void) ; if (!((sizeof(*&sk->sk_wq) == sizeof(char) || sizeof(*&sk->sk_wq) == sizeof(short) || sizeof(*&sk->sk_wq) == sizeof(int) || sizeof(*&sk->sk_wq) == sizeof(long)))) __compiletime_assert_397(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_398(void) ; if (!((sizeof(*&sk->sk_wq) == sizeof(char) || sizeof(*&sk->sk_wq) == sizeof(short) || sizeof(*&sk->sk_wq) == sizeof(int) || sizeof(*&sk->sk_wq) == sizeof(long)) || sizeof(*&sk->sk_wq) == sizeof(long long))) __compiletime_assert_398(); } while (0); do { *(volatile typeof(*&sk->sk_wq) *)&(*&sk->sk_wq) = ((typeof(*((typeof(sk->sk_wq))_r_a_p__v)) *)((typeof(sk->sk_wq))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
parent->sk = sk;
sk_set_socket(sk, parent);
sk->sk_uid = SOCK_INODE(parent)->i_uid;
security_sock_graft(sk, parent);
_raw_write_unlock_bh(&sk->sk_callback_lock);
}

kuid_t sock_i_uid(struct sock *sk);
unsigned long sock_i_ino(struct sock *sk);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
{
return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 net_tx_rndhash(void)
{
u32 v = prandom_u32();

return v ?: 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_set_txhash(struct sock *sk)
{

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_399(void) ; if (!((sizeof(sk->sk_txhash) == sizeof(char) || sizeof(sk->sk_txhash) == sizeof(short) || sizeof(sk->sk_txhash) == sizeof(int) || sizeof(sk->sk_txhash) == sizeof(long)) || sizeof(sk->sk_txhash) == sizeof(long long))) __compiletime_assert_399(); } while (0); do { *(volatile typeof(sk->sk_txhash) *)&(sk->sk_txhash) = (net_tx_rndhash()); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_rethink_txhash(struct sock *sk)
{
if (sk->sk_txhash && sk->sk_txrehash == 1) {
sk_set_txhash(sk);
return true;
}
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dst_entry *
__sk_dst_get(struct sock *sk)
{
return ({ typeof(*(sk->sk_dst_cache)) *__UNIQUE_ID_rcu400 = (typeof(*(sk->sk_dst_cache)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_401(void) ; if (!((sizeof((sk->sk_dst_cache)) == sizeof(char) || sizeof((sk->sk_dst_cache)) == sizeof(short) || sizeof((sk->sk_dst_cache)) == sizeof(int) || sizeof((sk->sk_dst_cache)) == sizeof(long)) || sizeof((sk->sk_dst_cache)) == sizeof(long long))) __compiletime_assert_401(); } while (0); (*(const volatile typeof( _Generic(((sk->sk_dst_cache)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((sk->sk_dst_cache)))) *)&((sk->sk_dst_cache))); }); do { } while (0 && (!((lockdep_sock_is_held(sk)) || rcu_read_lock_held()))); ; ((typeof(*(sk->sk_dst_cache)) *)(__UNIQUE_ID_rcu400)); });

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dst_entry *
sk_dst_get(struct sock *sk)
{
struct dst_entry *dst;

rcu_read_lock();
dst = ({ typeof(*(sk->sk_dst_cache)) *__UNIQUE_ID_rcu402 = (typeof(*(sk->sk_dst_cache)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_403(void) ; if (!((sizeof((sk->sk_dst_cache)) == sizeof(char) || sizeof((sk->sk_dst_cache)) == sizeof(short) || sizeof((sk->sk_dst_cache)) == sizeof(int) || sizeof((sk->sk_dst_cache)) == sizeof(long)) || sizeof((sk->sk_dst_cache)) == sizeof(long long))) __compiletime_assert_403(); } while (0); (*(const volatile typeof( _Generic(((sk->sk_dst_cache)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((sk->sk_dst_cache)))) *)&((sk->sk_dst_cache))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(sk->sk_dst_cache)) *)(__UNIQUE_ID_rcu402)); });
if (dst && !atomic_inc_not_zero(&dst->__refcnt))
dst = ((void *)0);
rcu_read_unlock();
return dst;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __dst_negative_advice(struct sock *sk)
{
struct dst_entry *ndst, *dst = __sk_dst_get(sk);

if (dst && dst->ops->negative_advice) {
ndst = dst->ops->negative_advice(dst);

if (ndst != dst) {
do { uintptr_t _r_a_p__v = (uintptr_t)(ndst); ; if (__builtin_constant_p(ndst) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_404(void) ; if (!((sizeof((sk->sk_dst_cache)) == sizeof(char) || sizeof((sk->sk_dst_cache)) == sizeof(short) || sizeof((sk->sk_dst_cache)) == sizeof(int) || sizeof((sk->sk_dst_cache)) == sizeof(long)) || sizeof((sk->sk_dst_cache)) == sizeof(long long))) __compiletime_assert_404(); } while (0); do { *(volatile typeof((sk->sk_dst_cache)) *)&((sk->sk_dst_cache)) = ((typeof(sk->sk_dst_cache))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_405(void) ; if (!((sizeof(*&sk->sk_dst_cache) == sizeof(char) || sizeof(*&sk->sk_dst_cache) == sizeof(short) || sizeof(*&sk->sk_dst_cache) == sizeof(int) || sizeof(*&sk->sk_dst_cache) == sizeof(long)))) __compiletime_assert_405(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_406(void) ; if (!((sizeof(*&sk->sk_dst_cache) == sizeof(char) || sizeof(*&sk->sk_dst_cache) == sizeof(short) || sizeof(*&sk->sk_dst_cache) == sizeof(int) || sizeof(*&sk->sk_dst_cache) == sizeof(long)) || sizeof(*&sk->sk_dst_cache) == sizeof(long long))) __compiletime_assert_406(); } while (0); do { *(volatile typeof(*&sk->sk_dst_cache) *)&(*&sk->sk_dst_cache) = ((typeof(*((typeof(sk->sk_dst_cache))_r_a_p__v)) *)((typeof(sk->sk_dst_cache))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
sk_tx_queue_clear(sk);
sk->sk_dst_pending_confirm = 0;
}
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_negative_advice(struct sock *sk)
{
sk_rethink_txhash(sk);
__dst_negative_advice(sk);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
__sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old_dst;

sk_tx_queue_clear(sk);
sk->sk_dst_pending_confirm = 0;
old_dst = ({ do { } while (0 && (!((lockdep_sock_is_held(sk))))); ; ((typeof(*(sk->sk_dst_cache)) *)((sk->sk_dst_cache))); });

do { uintptr_t _r_a_p__v = (uintptr_t)(dst); ; if (__builtin_constant_p(dst) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_407(void) ; if (!((sizeof((sk->sk_dst_cache)) == sizeof(char) || sizeof((sk->sk_dst_cache)) == sizeof(short) || sizeof((sk->sk_dst_cache)) == sizeof(int) || sizeof((sk->sk_dst_cache)) == sizeof(long)) || sizeof((sk->sk_dst_cache)) == sizeof(long long))) __compiletime_assert_407(); } while (0); do { *(volatile typeof((sk->sk_dst_cache)) *)&((sk->sk_dst_cache)) = ((typeof(sk->sk_dst_cache))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_408(void) ; if (!((sizeof(*&sk->sk_dst_cache) == sizeof(char) || sizeof(*&sk->sk_dst_cache) == sizeof(short) || sizeof(*&sk->sk_dst_cache) == sizeof(int) || sizeof(*&sk->sk_dst_cache) == sizeof(long)))) __compiletime_assert_408(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_409(void) ; if (!((sizeof(*&sk->sk_dst_cache) == sizeof(char) || sizeof(*&sk->sk_dst_cache) == sizeof(short) || sizeof(*&sk->sk_dst_cache) == sizeof(int) || sizeof(*&sk->sk_dst_cache) == sizeof(long)) || sizeof(*&sk->sk_dst_cache) == sizeof(long long))) __compiletime_assert_409(); } while (0); do { *(volatile typeof(*&sk->sk_dst_cache) *)&(*&sk->sk_dst_cache) = ((typeof(*((typeof(sk->sk_dst_cache))_r_a_p__v)) *)((typeof(sk->sk_dst_cache))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
dst_release(old_dst);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old_dst;

sk_tx_queue_clear(sk);
sk->sk_dst_pending_confirm = 0;
old_dst = ({ typeof(( struct dst_entry **)&sk->sk_dst_cache) __ai_ptr = (( struct dst_entry **)&sk->sk_dst_cache); do { } while (0); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__(*(__ai_ptr)) _x_ = (dst); (__typeof__(*(__ai_ptr))) ({ __typeof__((__ai_ptr)) __ptr = ((__ai_ptr)); __typeof__(_x_) __new = (_x_); __typeof__(*((__ai_ptr))) __ret; switch (sizeof(*(__ai_ptr))) { case 4: __asm__ __volatile__ ( " amoswap.w.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_410(void) ; if (!(!(1))) __compiletime_assert_410(); } while (0); } __ret; }); }); });
dst_release(old_dst);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
__sk_dst_reset(struct sock *sk)
{
__sk_dst_set(sk, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
sk_dst_reset(struct sock *sk)
{
sk_dst_set(sk, ((void *)0));
}

struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);

struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_dst_confirm(struct sock *sk)
{
if (!({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_411(void) ; if (!((sizeof(sk->sk_dst_pending_confirm) == sizeof(char) || sizeof(sk->sk_dst_pending_confirm) == sizeof(short) || sizeof(sk->sk_dst_pending_confirm) == sizeof(int) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long)) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long long))) __compiletime_assert_411(); } while (0); (*(const volatile typeof( _Generic((sk->sk_dst_pending_confirm), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_dst_pending_confirm))) *)&(sk->sk_dst_pending_confirm)); }))
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_412(void) ; if (!((sizeof(sk->sk_dst_pending_confirm) == sizeof(char) || sizeof(sk->sk_dst_pending_confirm) == sizeof(short) || sizeof(sk->sk_dst_pending_confirm) == sizeof(int) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long)) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long long))) __compiletime_assert_412(); } while (0); do { *(volatile typeof(sk->sk_dst_pending_confirm) *)&(sk->sk_dst_pending_confirm) = (1); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
{
if (skb_get_dst_pending_confirm(skb)) {
struct sock *sk = skb->sk;

if (sk && ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_413(void) ; if (!((sizeof(sk->sk_dst_pending_confirm) == sizeof(char) || sizeof(sk->sk_dst_pending_confirm) == sizeof(short) || sizeof(sk->sk_dst_pending_confirm) == sizeof(int) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long)) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long long))) __compiletime_assert_413(); } while (0); (*(const volatile typeof( _Generic((sk->sk_dst_pending_confirm), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_dst_pending_confirm))) *)&(sk->sk_dst_pending_confirm)); }))
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_414(void) ; if (!((sizeof(sk->sk_dst_pending_confirm) == sizeof(char) || sizeof(sk->sk_dst_pending_confirm) == sizeof(short) || sizeof(sk->sk_dst_pending_confirm) == sizeof(int) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long)) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long long))) __compiletime_assert_414(); } while (0); do { *(volatile typeof(sk->sk_dst_pending_confirm) *)&(sk->sk_dst_pending_confirm) = (0); } while (0); } while (0);
neigh_confirm(n);
}
}

bool sk_mc_loop(struct sock *sk);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_can_gso(const struct sock *sk)
{
return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
}

void sk_setup_caps(struct sock *sk, struct dst_entry *dst);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_gso_disable(struct sock *sk)
{
sk->sk_gso_disabled = 1;
sk->sk_route_caps &= ~(((netdev_features_t)1 << (NETIF_F_GSO_LAST + 1)) - ((netdev_features_t)1 << (NETIF_F_GSO_SHIFT)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
struct iov_iter *from, char *to,
int copy, int offset)
{
if (skb->ip_summed == 0) {
__wsum csum = 0;
if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
return -14;
skb->csum = csum_block_add(skb->csum, csum, offset);
} else if (sk->sk_route_caps & ((netdev_features_t)1 << (NETIF_F_NOCACHE_COPY_BIT))) {
if (!copy_from_iter_full_nocache(to, copy, from))
return -14;
} else if (!copy_from_iter_full(to, copy, from))
return -14;

return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
struct iov_iter *from, int copy)
{
int err, offset = skb->len;

err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
copy, offset);
if (err)
__skb_trim(skb, offset);

return err;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
struct sk_buff *skb,
struct page *page,
int off, int copy)
{
int err;

err = skb_do_copy_data_nocache(sk, skb, from, lowmem_page_address(page) + off,
copy, skb->len);
if (err)
return err;

skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
sk_wmem_queued_add(sk, copy);
sk_mem_charge(sk, copy);
return 0;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_wmem_alloc_get(const struct sock *sk)
{
return refcount_read(&sk->sk_wmem_alloc) - 1;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_rmem_alloc_get(const struct sock *sk)
{
return atomic_read(&sk->sk_backlog.rmem_alloc);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_has_allocations(const struct sock *sk)
{
return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
}
# 2306 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skwq_has_sleeper(struct socket_wq *wq)
{
return wq && wq_has_sleeper(&wq->wait);
}
# 2319 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_poll_wait(struct file *filp, struct socket *sock,
poll_table *p)
{
if (!poll_does_not_wait(p)) {
poll_wait(filp, &sock->wq.wait, p);





do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
{

u32 txhash = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_415(void) ; if (!((sizeof(sk->sk_txhash) == sizeof(char) || sizeof(sk->sk_txhash) == sizeof(short) || sizeof(sk->sk_txhash) == sizeof(int) || sizeof(sk->sk_txhash) == sizeof(long)) || sizeof(sk->sk_txhash) == sizeof(long long))) __compiletime_assert_415(); } while (0); (*(const volatile typeof( _Generic((sk->sk_txhash), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_txhash))) *)&(sk->sk_txhash)); });

if (txhash) {
skb->l4_hash = 1;
skb->hash = txhash;
}
}

void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
# 2354 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
skb_orphan(skb);
skb->sk = sk;
skb->destructor = sock_rfree;
atomic_add(skb->truesize, &sk->sk_backlog.rmem_alloc);
sk_mem_charge(sk, skb->truesize);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
{
if (sk && refcount_inc_not_zero(&sk->__sk_common.skc_refcnt)) {
skb_orphan(skb);
skb->destructor = sock_efree;
skb->sk = sk;
return true;
}
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_prepare_for_gro(struct sk_buff *skb)
{
if (skb->destructor != sock_wfree) {
skb_orphan(skb);
return;
}
skb->slow_gro = 1;
}

void sk_reset_timer(struct sock *sk, struct timer_list *timer,
unsigned long expires);

void sk_stop_timer(struct sock *sk, struct timer_list *timer);

void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);

int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
struct sk_buff *skb, unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb));
int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);

int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
struct sk_buff *sock_dequeue_err_skb(struct sock *sk);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sock_error(struct sock *sk)
{
int err;




if (__builtin_expect(!!(({ typeof( _Generic((({ !sk->sk_err; })), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (({ !sk->sk_err; })))) __v = ({ __kcsan_disable_current(); !sk->sk_err; }); __kcsan_enable_current(); __v; })), 1))
return 0;

err = ({ typeof(&sk->sk_err) __ai_ptr = (&sk->sk_err); do { } while (0); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__(*(__ai_ptr)) _x_ = (0); (__typeof__(*(__ai_ptr))) ({ __typeof__((__ai_ptr)) __ptr = ((__ai_ptr)); __typeof__(_x_) __new = (_x_); __typeof__(*((__ai_ptr))) __ret; switch (sizeof(*(__ai_ptr))) { case 4: __asm__ __volatile__ ( " amoswap.w.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_416(void) ; if (!(!(1))) __compiletime_assert_416(); } while (0); } __ret; }); }); });
return -err;
}

void sk_error_report(struct sock *sk);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long sock_wspace(struct sock *sk)
{
int amt = 0;

if (!(sk->sk_shutdown & 2)) {
amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
if (amt < 0)
amt = 0;
}
return amt;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_set_bit(int nr, struct sock *sk)
{
if ((nr == 0 || nr == 1) &&
!sock_flag(sk, SOCK_FASYNC))
return;

set_bit(nr, &sk->sk_wq_raw->flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_clear_bit(int nr, struct sock *sk)
{
if ((nr == 0 || nr == 1) &&
!sock_flag(sk, SOCK_FASYNC))
return;

clear_bit(nr, &sk->sk_wq_raw->flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_wake_async(const struct sock *sk, int how, int band)
{
if (sock_flag(sk, SOCK_FASYNC)) {
rcu_read_lock();
sock_wake_async(({ typeof(*(sk->sk_wq)) *__UNIQUE_ID_rcu417 = (typeof(*(sk->sk_wq)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_418(void) ; if (!((sizeof((sk->sk_wq)) == sizeof(char) || sizeof((sk->sk_wq)) == sizeof(short) || sizeof((sk->sk_wq)) == sizeof(int) || sizeof((sk->sk_wq)) == sizeof(long)) || sizeof((sk->sk_wq)) == sizeof(long long))) __compiletime_assert_418(); } while (0); (*(const volatile typeof( _Generic(((sk->sk_wq)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((sk->sk_wq)))) *)&((sk->sk_wq))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(sk->sk_wq)) *)(__UNIQUE_ID_rcu417)); }), how, band);
rcu_read_unlock();
}
}
# 2473 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_stream_moderate_sndbuf(struct sock *sk)
{
u32 val;

if (sk->sk_userlocks & 1)
return;

val = __builtin_choose_expr(((!!(sizeof((typeof(sk->sk_sndbuf) *)1 == (typeof(sk->sk_wmem_queued >> 1) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(sk->sk_sndbuf) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(sk->sk_wmem_queued >> 1) * 0l)) : (int *)8))))), ((sk->sk_sndbuf) < (sk->sk_wmem_queued >> 1) ? (sk->sk_sndbuf) : (sk->sk_wmem_queued >> 1)), ({ typeof(sk->sk_sndbuf) __UNIQUE_ID___x419 = (sk->sk_sndbuf); typeof(sk->sk_wmem_queued >> 1) __UNIQUE_ID___y420 = (sk->sk_wmem_queued >> 1); ((__UNIQUE_ID___x419) < (__UNIQUE_ID___y420) ? (__UNIQUE_ID___x419) : (__UNIQUE_ID___y420)); }));
val = __builtin_choose_expr(((!!(sizeof((typeof((u32)(val)) *)1 == (typeof((u32)(sk_unused_reserved_mem(sk))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u32)(val)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u32)(sk_unused_reserved_mem(sk))) * 0l)) : (int *)8))))), (((u32)(val)) > ((u32)(sk_unused_reserved_mem(sk))) ? ((u32)(val)) : ((u32)(sk_unused_reserved_mem(sk)))), ({ typeof((u32)(val)) __UNIQUE_ID___x421 = ((u32)(val)); typeof((u32)(sk_unused_reserved_mem(sk))) __UNIQUE_ID___y422 = ((u32)(sk_unused_reserved_mem(sk))); ((__UNIQUE_ID___x421) > (__UNIQUE_ID___y422) ? (__UNIQUE_ID___x421) : (__UNIQUE_ID___y422)); }));

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_425(void) ; if (!((sizeof(sk->sk_sndbuf) == sizeof(char) || sizeof(sk->sk_sndbuf) == sizeof(short) || sizeof(sk->sk_sndbuf) == sizeof(int) || sizeof(sk->sk_sndbuf) == sizeof(long)) || sizeof(sk->sk_sndbuf) == sizeof(long long))) __compiletime_assert_425(); } while (0); do { *(volatile typeof(sk->sk_sndbuf) *)&(sk->sk_sndbuf) = (__builtin_choose_expr(((!!(sizeof((typeof((u32)(val)) *)1 == (typeof((u32)(((2048 + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1))) * 2))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u32)(val)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u32)(((2048 + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1))) * 2))) * 0l)) : (int *)8))))), (((u32)(val)) > ((u32)(((2048 + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1))) * 2))) ? ((u32)(val)) : ((u32)(((2048 + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1))) * 2)))), ({ typeof((u32)(val)) __UNIQUE_ID___x423 = ((u32)(val)); typeof((u32)(((2048 + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1))) * 2))) __UNIQUE_ID___y424 = ((u32)(((2048 + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1))) * 2))); ((__UNIQUE_ID___x423) > (__UNIQUE_ID___y424) ? (__UNIQUE_ID___x423) : (__UNIQUE_ID___y424)); }))); } while (0); } while (0);
}
# 2503 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct page_frag *sk_page_frag(struct sock *sk)
{
if ((sk->sk_allocation & ((( gfp_t)0x400u) | (( gfp_t)0x20000u) | (( gfp_t)0x80u))) ==
((( gfp_t)0x400u) | (( gfp_t)0x80u)))
return &get_current()->task_frag;

return &sk->sk_frag;
}

bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sock_writeable(const struct sock *sk)
{
return refcount_read(&sk->sk_wmem_alloc) < (({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_426(void) ; if (!((sizeof(sk->sk_sndbuf) == sizeof(char) || sizeof(sk->sk_sndbuf) == sizeof(short) || sizeof(sk->sk_sndbuf) == sizeof(int) || sizeof(sk->sk_sndbuf) == sizeof(long)) || sizeof(sk->sk_sndbuf) == sizeof(long long))) __compiletime_assert_426(); } while (0); (*(const volatile typeof( _Generic((sk->sk_sndbuf), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_sndbuf))) *)&(sk->sk_sndbuf)); }) >> 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) gfp_t gfp_any(void)
{
return ((preempt_count() & (((1UL << (8))-1) << (0 + 8)))) ? ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)) : ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) gfp_t gfp_memcg_charge(void)
{
return ((preempt_count() & (((1UL << (8))-1) << (0 + 8)))) ? ((( gfp_t)0x800u)) : ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long sock_rcvtimeo(const struct sock *sk, bool noblock)
{
return noblock ? 0 : sk->sk_rcvtimeo;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long sock_sndtimeo(const struct sock *sk, bool noblock)
{
return noblock ? 0 : sk->sk_sndtimeo;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sock_rcvlowat(const struct sock *sk, int waitall, int len)
{
int v = waitall ? len : __builtin_choose_expr(((!!(sizeof((typeof((int)(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_427(void) ; if (!((sizeof(sk->sk_rcvlowat) == sizeof(char) || sizeof(sk->sk_rcvlowat) == sizeof(short) || sizeof(sk->sk_rcvlowat) == sizeof(int) || sizeof(sk->sk_rcvlowat) == sizeof(long)) || sizeof(sk->sk_rcvlowat) == sizeof(long long))) __compiletime_assert_427(); } while (0); (*(const volatile typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) *)&(sk->sk_rcvlowat)); }))) *)1 == (typeof((int)(len)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_427(void) ; if (!((sizeof(sk->sk_rcvlowat) == sizeof(char) || sizeof(sk->sk_rcvlowat) == sizeof(short) || sizeof(sk->sk_rcvlowat) == sizeof(int) || sizeof(sk->sk_rcvlowat) == sizeof(long)) || sizeof(sk->sk_rcvlowat) == sizeof(long long))) __compiletime_assert_427(); } while (0); (*(const volatile typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) *)&(sk->sk_rcvlowat)); }))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(len)) * 0l)) : (int *)8))))), (((int)(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_427(void) ; if (!((sizeof(sk->sk_rcvlowat) == sizeof(char) || sizeof(sk->sk_rcvlowat) == sizeof(short) || sizeof(sk->sk_rcvlowat) == sizeof(int) || sizeof(sk->sk_rcvlowat) == sizeof(long)) || sizeof(sk->sk_rcvlowat) == sizeof(long long))) __compiletime_assert_427(); } while (0); (*(const volatile typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) *)&(sk->sk_rcvlowat)); }))) < ((int)(len)) ? ((int)(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_427(void) ; if (!((sizeof(sk->sk_rcvlowat) == sizeof(char) || sizeof(sk->sk_rcvlowat) == sizeof(short) || sizeof(sk->sk_rcvlowat) == sizeof(int) || sizeof(sk->sk_rcvlowat) == sizeof(long)) || sizeof(sk->sk_rcvlowat) == sizeof(long long))) __compiletime_assert_427(); } while (0); (*(const volatile typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) *)&(sk->sk_rcvlowat)); }))) : ((int)(len))), ({ typeof((int)(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_427(void) ; if (!((sizeof(sk->sk_rcvlowat) == sizeof(char) || sizeof(sk->sk_rcvlowat) == sizeof(short) || sizeof(sk->sk_rcvlowat) == sizeof(int) || sizeof(sk->sk_rcvlowat) == sizeof(long)) || sizeof(sk->sk_rcvlowat) == sizeof(long long))) __compiletime_assert_427(); } while (0); (*(const volatile typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) *)&(sk->sk_rcvlowat)); }))) __UNIQUE_ID___x428 = ((int)(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_427(void) ; if (!((sizeof(sk->sk_rcvlowat) == sizeof(char) || sizeof(sk->sk_rcvlowat) == sizeof(short) || sizeof(sk->sk_rcvlowat) == sizeof(int) || sizeof(sk->sk_rcvlowat) == sizeof(long)) || sizeof(sk->sk_rcvlowat) == sizeof(long long))) __compiletime_assert_427(); } while (0); (*(const volatile typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) *)&(sk->sk_rcvlowat)); }))); typeof((int)(len)) __UNIQUE_ID___y429 = ((int)(len)); ((__UNIQUE_ID___x428) < (__UNIQUE_ID___y429) ? (__UNIQUE_ID___x428) : (__UNIQUE_ID___y429)); }));

return v ?: 1;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sock_intr_errno(long timeo)
{
return timeo == ((long)(~0UL >> 1)) ? -512 : -4;
}

struct sock_skb_cb {
u32 dropcount;
};
# 2574 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
{
((struct sock_skb_cb *)((skb)->cb + ((sizeof((((struct sk_buff *)0)->cb)) - sizeof(struct sock_skb_cb)))))->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
atomic_read(&sk->sk_drops) : 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
{
int segs = __builtin_choose_expr(((!!(sizeof((typeof((u16)(1)) *)1 == (typeof((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u16)(1)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)) * 0l)) : (int *)8))))), (((u16)(1)) > ((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)) ? ((u16)(1)) : ((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs))), ({ typeof((u16)(1)) __UNIQUE_ID___x430 = ((u16)(1)); typeof((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)) __UNIQUE_ID___y431 = ((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)); ((__UNIQUE_ID___x430) > (__UNIQUE_ID___y431) ? (__UNIQUE_ID___x430) : (__UNIQUE_ID___y431)); }));

atomic_add(segs, &sk->sk_drops);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ktime_t sock_read_timestamp(struct sock *sk)
{
# 2601 "./include/net/sock.h"
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_432(void) ; if (!((sizeof(sk->sk_stamp) == sizeof(char) || sizeof(sk->sk_stamp) == sizeof(short) || sizeof(sk->sk_stamp) == sizeof(int) || sizeof(sk->sk_stamp) == sizeof(long)) || sizeof(sk->sk_stamp) == sizeof(long long))) __compiletime_assert_432(); } while (0); (*(const volatile typeof( _Generic((sk->sk_stamp), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_stamp))) *)&(sk->sk_stamp)); });

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_write_timestamp(struct sock *sk, ktime_t kt)
{





do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_433(void) ; if (!((sizeof(sk->sk_stamp) == sizeof(char) || sizeof(sk->sk_stamp) == sizeof(short) || sizeof(sk->sk_stamp) == sizeof(int) || sizeof(sk->sk_stamp) == sizeof(long)) || sizeof(sk->sk_stamp) == sizeof(long long))) __compiletime_assert_433(); } while (0); do { *(volatile typeof(sk->sk_stamp) *)&(sk->sk_stamp) = (kt); } while (0); } while (0);

}

void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
ktime_t kt = skb->tstamp;
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);







if (sock_flag(sk, SOCK_RCVTSTAMP) ||
(sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
(kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
(hwtstamps->hwtstamp &&
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
sock_write_timestamp(sk, kt);

if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
__sock_recv_wifi_status(msg, sk, skb);
}

void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{





if (sk->__sk_common.skc_flags & ((1UL << SOCK_RXQ_OVFL) | (1UL << SOCK_RCVTSTAMP)) || sk->sk_tsflags & (SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE))
__sock_recv_ts_and_drops(msg, sk, skb);
else if (__builtin_expect(!!(sock_flag(sk, SOCK_TIMESTAMP)), 0))
sock_write_timestamp(sk, skb->tstamp);
else if (__builtin_expect(!!(sk->sk_stamp == (-1L * 1000000000L)), 0))
sock_write_timestamp(sk, 0);
}

void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
# 2677 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
__u8 *tx_flags, __u32 *tskey)
{
if (__builtin_expect(!!(tsflags), 0)) {
__sock_tx_timestamp(tsflags, tx_flags);
if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
tsflags & (SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_TX_SCHED | SOF_TIMESTAMPING_TX_ACK))
*tskey = atomic_inc_return(&sk->sk_tskey) - 1;
}
if (__builtin_expect(!!(sock_flag(sk, SOCK_WIFI_STATUS)), 0))
*tx_flags |= SKBTX_WIFI_STATUS;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
__u8 *tx_flags)
{
_sock_tx_timestamp(sk, tsflags, tx_flags, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
{
_sock_tx_timestamp(skb->sk, tsflags, &((struct skb_shared_info *)(skb_end_pointer(skb)))->tx_flags,
&((struct skb_shared_info *)(skb_end_pointer(skb)))->tskey);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_is_tcp(const struct sock *sk)
{
return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
}
# 2715 "./include/net/sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
skb_sk_is_prefetched(struct sk_buff *skb)
{

return skb->destructor == sock_pfree;



}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_fullsock(const struct sock *sk)
{
return (1 << sk->__sk_common.skc_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
sk_is_refcounted(struct sock *sk)
{

return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *
skb_steal_sock(struct sk_buff *skb, bool *refcounted)
{
if (skb->sk) {
struct sock *sk = skb->sk;

*refcounted = true;
if (skb_sk_is_prefetched(skb))
*refcounted = sk_is_refcounted(sk);
skb->destructor = ((void *)0);
skb->sk = ((void *)0);
return sk;
}
*refcounted = false;
return ((void *)0);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
struct net_device *dev)
{
# 2789 "./include/net/sock.h"
return skb;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_listener(const struct sock *sk)
{
return (1 << sk->__sk_common.skc_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
}

void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
int type);

bool sk_ns_capable(const struct sock *sk,
struct user_namespace *user_ns, int cap);
bool sk_capable(const struct sock *sk, int cap);
bool sk_net_capable(const struct sock *sk, int cap);

void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
# 2821 "./include/net/sock.h"
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;

extern int sysctl_tstamp_allow_data;
extern int sysctl_optmem_max;

extern __u32 sysctl_wmem_default;
extern __u32 sysctl_rmem_default;


extern struct static_key_false net_high_order_alloc_disable_key;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
{

if (proto->sysctl_wmem_offset)
return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);

return *proto->sysctl_wmem;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
{

if (proto->sysctl_rmem_offset)
return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);

return *proto->sysctl_rmem;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_pacing_shift_update(struct sock *sk, int val)
{
if (!sk || !sk_fullsock(sk) || ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_434(void) ; if (!((sizeof(sk->sk_pacing_shift) == sizeof(char) || sizeof(sk->sk_pacing_shift) == sizeof(short) || sizeof(sk->sk_pacing_shift) == sizeof(int) || sizeof(sk->sk_pacing_shift) == sizeof(long)) || sizeof(sk->sk_pacing_shift) == sizeof(long long))) __compiletime_assert_434(); } while (0); (*(const volatile typeof( _Generic((sk->sk_pacing_shift), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_pacing_shift))) *)&(sk->sk_pacing_shift)); }) == val)
return;
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_435(void) ; if (!((sizeof(sk->sk_pacing_shift) == sizeof(char) || sizeof(sk->sk_pacing_shift) == sizeof(short) || sizeof(sk->sk_pacing_shift) == sizeof(int) || sizeof(sk->sk_pacing_shift) == sizeof(long)) || sizeof(sk->sk_pacing_shift) == sizeof(long long))) __compiletime_assert_435(); } while (0); do { *(volatile typeof(sk->sk_pacing_shift) *)&(sk->sk_pacing_shift) = (val); } while (0); } while (0);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_dev_equal_l3scope(struct sock *sk, int dif)
{
int mdif;

if (!sk->__sk_common.skc_bound_dev_if || sk->__sk_common.skc_bound_dev_if == dif)
return true;

mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
if (mdif && mdif == sk->__sk_common.skc_bound_dev_if)
return true;

return false;
}

void sock_def_readable(struct sock *sk);

int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
int sock_set_timestamping(struct sock *sk, int optname,
struct so_timestamping timestamping);

void sock_enable_timestamps(struct sock *sk);
void sock_no_linger(struct sock *sk);
void sock_set_keepalive(struct sock *sk);
void sock_set_priority(struct sock *sk, u32 priority);
void sock_set_rcvbuf(struct sock *sk, int val);
void sock_set_mark(struct sock *sk, u32 val);
void sock_set_reuseaddr(struct sock *sk);
void sock_set_reuseport(struct sock *sk);
void sock_set_sndtimeo(struct sock *sk, s64 secs);

int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);

int sock_get_timeout(long timeo, void *optval, bool old_timeval);
int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
sockptr_t optval, int optlen, bool old_timeval);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_is_readable(struct sock *sk)
{
if (sk->__sk_common.skc_prot->sock_is_readable)
return sk->__sk_common.skc_prot->sock_is_readable(sk);
return false;
}
# 9 "./include/linux/mroute_base.h" 2

# 1 "./include/net/ip_fib.h" 1
# 20 "./include/net/ip_fib.h"
# 1 "./include/net/inet_dscp.h" 1
# 38 "./include/net/inet_dscp.h"
typedef u8 dscp_t;



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) dscp_t inet_dsfield_to_dscp(__u8 dsfield)
{
return ( dscp_t)(dsfield & 0xfc);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u8 inet_dscp_to_dsfield(dscp_t dscp)
{
return ( __u8)dscp;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inet_validate_dscp(__u8 val)
{
return !(val & ~0xfc);
}
# 21 "./include/net/ip_fib.h" 2
# 1 "./include/net/inetpeer.h" 1
# 16 "./include/net/inetpeer.h"
# 1 "./include/net/ipv6.h" 1
# 12 "./include/net/ipv6.h"
# 1 "./include/linux/ipv6.h" 1




# 1 "./include/uapi/linux/ipv6.h" 1
# 21 "./include/uapi/linux/ipv6.h"
struct in6_pktinfo {
struct in6_addr ipi6_addr;
int ipi6_ifindex;
};



struct ip6_mtuinfo {
struct sockaddr_in6 ip6m_addr;
__u32 ip6m_mtu;
};


struct in6_ifreq {
struct in6_addr ifr6_addr;
__u32 ifr6_prefixlen;
int ifr6_ifindex;
};
# 49 "./include/uapi/linux/ipv6.h"
struct ipv6_rt_hdr {
__u8 nexthdr;
__u8 hdrlen;
__u8 type;
__u8 segments_left;





};


struct ipv6_opt_hdr {
__u8 nexthdr;
__u8 hdrlen;



} __attribute__((packed));
# 80 "./include/uapi/linux/ipv6.h"
struct rt0_hdr {
struct ipv6_rt_hdr rt_hdr;
__u32 reserved;
struct in6_addr addr[0];


};





struct rt2_hdr {
struct ipv6_rt_hdr rt_hdr;
__u32 reserved;
struct in6_addr addr;


};





struct ipv6_destopt_hao {
__u8 type;
__u8 length;
struct in6_addr addr;
} __attribute__((packed));
# 117 "./include/uapi/linux/ipv6.h"
struct ipv6hdr {

__u8 priority:4,
version:4;






__u8 flow_lbl[3];

__be16 payload_len;
__u8 nexthdr;
__u8 hop_limit;

struct in6_addr saddr;
struct in6_addr daddr;
};



enum {
DEVCONF_FORWARDING = 0,
DEVCONF_HOPLIMIT,
DEVCONF_MTU6,
DEVCONF_ACCEPT_RA,
DEVCONF_ACCEPT_REDIRECTS,
DEVCONF_AUTOCONF,
DEVCONF_DAD_TRANSMITS,
DEVCONF_RTR_SOLICITS,
DEVCONF_RTR_SOLICIT_INTERVAL,
DEVCONF_RTR_SOLICIT_DELAY,
DEVCONF_USE_TEMPADDR,
DEVCONF_TEMP_VALID_LFT,
DEVCONF_TEMP_PREFERED_LFT,
DEVCONF_REGEN_MAX_RETRY,
DEVCONF_MAX_DESYNC_FACTOR,
DEVCONF_MAX_ADDRESSES,
DEVCONF_FORCE_MLD_VERSION,
DEVCONF_ACCEPT_RA_DEFRTR,
DEVCONF_ACCEPT_RA_PINFO,
DEVCONF_ACCEPT_RA_RTR_PREF,
DEVCONF_RTR_PROBE_INTERVAL,
DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN,
DEVCONF_PROXY_NDP,
DEVCONF_OPTIMISTIC_DAD,
DEVCONF_ACCEPT_SOURCE_ROUTE,
DEVCONF_MC_FORWARDING,
DEVCONF_DISABLE_IPV6,
DEVCONF_ACCEPT_DAD,
DEVCONF_FORCE_TLLAO,
DEVCONF_NDISC_NOTIFY,
DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL,
DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
DEVCONF_SUPPRESS_FRAG_NDISC,
DEVCONF_ACCEPT_RA_FROM_LOCAL,
DEVCONF_USE_OPTIMISTIC,
DEVCONF_ACCEPT_RA_MTU,
DEVCONF_STABLE_SECRET,
DEVCONF_USE_OIF_ADDRS_ONLY,
DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
DEVCONF_DROP_UNSOLICITED_NA,
DEVCONF_KEEP_ADDR_ON_DOWN,
DEVCONF_RTR_SOLICIT_MAX_INTERVAL,
DEVCONF_SEG6_ENABLED,
DEVCONF_SEG6_REQUIRE_HMAC,
DEVCONF_ENHANCED_DAD,
DEVCONF_ADDR_GEN_MODE,
DEVCONF_DISABLE_POLICY,
DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN,
DEVCONF_NDISC_TCLASS,
DEVCONF_RPL_SEG_ENABLED,
DEVCONF_RA_DEFRTR_METRIC,
DEVCONF_IOAM6_ENABLED,
DEVCONF_IOAM6_ID,
DEVCONF_IOAM6_ID_WIDE,
DEVCONF_NDISC_EVICT_NOCARRIER,
DEVCONF_MAX
};
# 6 "./include/linux/ipv6.h" 2






struct ipv6_devconf {
__s32 forwarding;
__s32 hop_limit;
__s32 mtu6;
__s32 accept_ra;
__s32 accept_redirects;
__s32 autoconf;
__s32 dad_transmits;
__s32 rtr_solicits;
__s32 rtr_solicit_interval;
__s32 rtr_solicit_max_interval;
__s32 rtr_solicit_delay;
__s32 force_mld_version;
__s32 mldv1_unsolicited_report_interval;
__s32 mldv2_unsolicited_report_interval;
__s32 use_tempaddr;
__s32 temp_valid_lft;
__s32 temp_prefered_lft;
__s32 regen_max_retry;
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
__u32 ra_defrtr_metric;
__s32 accept_ra_min_hop_limit;
__s32 accept_ra_pinfo;
__s32 ignore_routes_with_linkdown;
# 46 "./include/linux/ipv6.h"
__s32 proxy_ndp;
__s32 accept_source_route;
__s32 accept_ra_from_local;







__s32 disable_ipv6;
__s32 drop_unicast_in_l2_multicast;
__s32 accept_dad;
__s32 force_tllao;
__s32 ndisc_notify;
__s32 suppress_frag_ndisc;
__s32 accept_ra_mtu;
__s32 drop_unsolicited_na;
struct ipv6_stable_secret {
bool initialized;
struct in6_addr secret;
} stable_secret;
__s32 use_oif_addrs_only;
__s32 keep_addr_on_down;
__s32 seg6_enabled;



__u32 enhanced_dad;
__u32 addr_gen_mode;
__s32 disable_policy;
__s32 ndisc_tclass;
__s32 rpl_seg_enabled;
__u32 ioam6_id;
__u32 ioam6_id_wide;
__u8 ioam6_enabled;
__u8 ndisc_evict_nocarrier;

struct ctl_table_header *sysctl_header;
};

struct ipv6_params {
__s32 disable_ipv6;
__s32 autoconf;
};
extern struct ipv6_params ipv6_defaults;

# 1 "./include/linux/tcp.h" 1
# 18 "./include/linux/tcp.h"
# 1 "./include/linux/win_minmax.h" 1
# 12 "./include/linux/win_minmax.h"
struct minmax_sample {
u32 t;
u32 v;
};


struct minmax {
struct minmax_sample s[3];
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 minmax_get(const struct minmax *m)
{
return m->s[0].v;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 minmax_reset(struct minmax *m, u32 t, u32 meas)
{
struct minmax_sample val = { .t = t, .v = meas };

m->s[2] = m->s[1] = m->s[0] = val;
return m->s[0].v;
}

u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas);
u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas);
# 19 "./include/linux/tcp.h" 2

# 1 "./include/net/inet_connection_sock.h" 1
# 21 "./include/net/inet_connection_sock.h"
# 1 "./include/net/inet_sock.h" 1
# 18 "./include/net/inet_sock.h"
# 1 "./include/linux/jhash.h" 1
# 27 "./include/linux/jhash.h"
# 1 "./include/linux/unaligned/packed_struct.h" 1





struct __una_u16 { u16 x; } __attribute__((__packed__));
struct __una_u32 { u32 x; } __attribute__((__packed__));
struct __una_u64 { u64 x; } __attribute__((__packed__));

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 __get_unaligned_cpu16(const void *p)
{
const struct __una_u16 *ptr = (const struct __una_u16 *)p;
return ptr->x;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 __get_unaligned_cpu32(const void *p)
{
const struct __una_u32 *ptr = (const struct __una_u32 *)p;
return ptr->x;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 __get_unaligned_cpu64(const void *p)
{
const struct __una_u64 *ptr = (const struct __una_u64 *)p;
return ptr->x;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __put_unaligned_cpu16(u16 val, void *p)
{
struct __una_u16 *ptr = (struct __una_u16 *)p;
ptr->x = val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __put_unaligned_cpu32(u32 val, void *p)
{
struct __una_u32 *ptr = (struct __una_u32 *)p;
ptr->x = val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __put_unaligned_cpu64(u64 val, void *p)
{
struct __una_u64 *ptr = (struct __una_u64 *)p;
ptr->x = val;
}
# 28 "./include/linux/jhash.h" 2
# 70 "./include/linux/jhash.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
const u8 *k = key;


a = b = c = 0xdeadbeef + length + initval;


while (length > 12) {
a += __get_unaligned_cpu32(k);
b += __get_unaligned_cpu32(k + 4);
c += __get_unaligned_cpu32(k + 8);
{ a -= c; a ^= rol32(c, 4); c += b; b -= a; b ^= rol32(a, 6); a += c; c -= b; c ^= rol32(b, 8); b += a; a -= c; a ^= rol32(c, 16); c += b; b -= a; b ^= rol32(a, 19); a += c; c -= b; c ^= rol32(b, 4); b += a; };
length -= 12;
k += 12;
}

switch (length) {
case 12: c += (u32)k[11]<<24; __attribute__((__fallthrough__));
case 11: c += (u32)k[10]<<16; __attribute__((__fallthrough__));
case 10: c += (u32)k[9]<<8; __attribute__((__fallthrough__));
case 9: c += k[8]; __attribute__((__fallthrough__));
case 8: b += (u32)k[7]<<24; __attribute__((__fallthrough__));
case 7: b += (u32)k[6]<<16; __attribute__((__fallthrough__));
case 6: b += (u32)k[5]<<8; __attribute__((__fallthrough__));
case 5: b += k[4]; __attribute__((__fallthrough__));
case 4: a += (u32)k[3]<<24; __attribute__((__fallthrough__));
case 3: a += (u32)k[2]<<16; __attribute__((__fallthrough__));
case 2: a += (u32)k[1]<<8; __attribute__((__fallthrough__));
case 1: a += k[0];
{ c ^= b; c -= rol32(b, 14); a ^= c; a -= rol32(c, 11); b ^= a; b -= rol32(a, 25); c ^= b; c -= rol32(b, 16); a ^= c; a -= rol32(c, 4); b ^= a; b -= rol32(a, 14); c ^= b; c -= rol32(b, 24); };
break;
case 0:
break;
}

return c;
}
# 117 "./include/linux/jhash.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 jhash2(const u32 *k, u32 length, u32 initval)
{
u32 a, b, c;


a = b = c = 0xdeadbeef + (length<<2) + initval;


while (length > 3) {
a += k[0];
b += k[1];
c += k[2];
{ a -= c; a ^= rol32(c, 4); c += b; b -= a; b ^= rol32(a, 6); a += c; c -= b; c ^= rol32(b, 8); b += a; a -= c; a ^= rol32(c, 16); c += b; b -= a; b ^= rol32(a, 19); a += c; c -= b; c ^= rol32(b, 4); b += a; };
length -= 3;
k += 3;
}


switch (length) {
case 3: c += k[2]; __attribute__((__fallthrough__));
case 2: b += k[1]; __attribute__((__fallthrough__));
case 1: a += k[0];
{ c ^= b; c -= rol32(b, 14); a ^= c; a -= rol32(c, 11); b ^= a; b -= rol32(a, 25); c ^= b; c -= rol32(b, 16); a ^= c; a -= rol32(c, 4); b ^= a; b -= rol32(a, 14); c ^= b; c -= rol32(b, 24); };
break;
case 0:
break;
}

return c;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
b += initval;
c += initval;

{ c ^= b; c -= rol32(b, 14); a ^= c; a -= rol32(c, 11); b ^= a; b -= rol32(a, 25); c ^= b; c -= rol32(b, 16); a ^= c; a -= rol32(c, 4); b ^= a; b -= rol32(a, 14); c ^= b; c -= rol32(b, 24); };

return c;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
{
return __jhash_nwords(a, b, c, initval + 0xdeadbeef + (3 << 2));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + 0xdeadbeef + (2 << 2));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 jhash_1word(u32 a, u32 initval)
{
return __jhash_nwords(a, 0, 0, initval + 0xdeadbeef + (1 << 2));
}
# 19 "./include/net/inet_sock.h" 2




# 1 "./include/net/request_sock.h" 1
# 22 "./include/net/request_sock.h"
struct request_sock;
struct sk_buff;
struct dst_entry;
struct proto;

struct request_sock_ops {
int family;
unsigned int obj_size;
struct kmem_cache *slab;
char *slab_name;
int (*rtx_syn_ack)(const struct sock *sk,
struct request_sock *req);
void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
void (*send_reset)(const struct sock *sk,
struct sk_buff *skb);
void (*destructor)(struct request_sock *req);
void (*syn_ack_timeout)(const struct request_sock *req);
};

int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);

struct saved_syn {
u32 mac_hdrlen;
u32 network_hdrlen;
u32 tcp_hdrlen;
u8 data[];
};



struct request_sock {
struct sock_common __req_common;






struct request_sock *dl_next;
u16 mss;
u8 num_retrans;
u8 syncookie:1;
u8 num_timeout:7;
u32 ts_recent;
struct timer_list rsk_timer;
const struct request_sock_ops *rsk_ops;
struct sock *sk;
struct saved_syn *saved_syn;
u32 secid;
u32 peer_secid;
u32 timeout;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct request_sock *inet_reqsk(const struct sock *sk)
{
return (struct request_sock *)sk;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *req_to_sk(struct request_sock *req)
{
return (struct sock *)req;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct request_sock *
reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
bool attach_listener)
{
struct request_sock *req;

req = kmem_cache_alloc(ops->slab, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)) | (( gfp_t)0x2000u));
if (!req)
return ((void *)0);
req->__req_common.skc_listener = ((void *)0);
if (attach_listener) {
if (__builtin_expect(!!(!refcount_inc_not_zero(&sk_listener->__sk_common.skc_refcnt)), 0)) {
kmem_cache_free(ops->slab, req);
return ((void *)0);
}
req->__req_common.skc_listener = sk_listener;
}
req->rsk_ops = ops;
req_to_sk(req)->__sk_common.skc_prot = sk_listener->__sk_common.skc_prot;
sk_node_init(&req_to_sk(req)->__sk_common.skc_node);
sk_tx_queue_clear(req_to_sk(req));
req->saved_syn = ((void *)0);
req->timeout = 0;
req->num_timeout = 0;
req->num_retrans = 0;
req->sk = ((void *)0);
refcount_set(&req->__req_common.skc_refcnt, 0);

return req;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __reqsk_free(struct request_sock *req)
{
req->rsk_ops->destructor(req);
if (req->__req_common.skc_listener)
sock_put(req->__req_common.skc_listener);
kfree(req->saved_syn);
kmem_cache_free(req->rsk_ops->slab, req);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void reqsk_free(struct request_sock *req)
{
({ int __ret_warn_on = !!(refcount_read(&req->__req_common.skc_refcnt) != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/request_sock.h"), "i" (128), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
__reqsk_free(req);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void reqsk_put(struct request_sock *req)
{
if (refcount_dec_and_test(&req->__req_common.skc_refcnt))
reqsk_free(req);
}
# 155 "./include/net/request_sock.h"
struct fastopen_queue {
struct request_sock *rskq_rst_head;
struct request_sock *rskq_rst_tail;



spinlock_t lock;
int qlen;
int max_qlen;

struct tcp_fastopen_context *ctx;
};
# 175 "./include/net/request_sock.h"
struct request_sock_queue {
spinlock_t rskq_lock;
u8 rskq_defer_accept;

u32 synflood_warned;
atomic_t qlen;
atomic_t young;

struct request_sock *rskq_accept_head;
struct request_sock *rskq_accept_tail;
struct fastopen_queue fastopenq;


};

void reqsk_queue_alloc(struct request_sock_queue *queue);

void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
bool reset);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool reqsk_queue_empty(const struct request_sock_queue *queue)
{
return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_436(void) ; if (!((sizeof(queue->rskq_accept_head) == sizeof(char) || sizeof(queue->rskq_accept_head) == sizeof(short) || sizeof(queue->rskq_accept_head) == sizeof(int) || sizeof(queue->rskq_accept_head) == sizeof(long)) || sizeof(queue->rskq_accept_head) == sizeof(long long))) __compiletime_assert_436(); } while (0); (*(const volatile typeof( _Generic((queue->rskq_accept_head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (queue->rskq_accept_head))) *)&(queue->rskq_accept_head)); }) == ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
struct sock *parent)
{
struct request_sock *req;

spin_lock_bh(&queue->rskq_lock);
req = queue->rskq_accept_head;
if (req) {
sk_acceptq_removed(parent);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_437(void) ; if (!((sizeof(queue->rskq_accept_head) == sizeof(char) || sizeof(queue->rskq_accept_head) == sizeof(short) || sizeof(queue->rskq_accept_head) == sizeof(int) || sizeof(queue->rskq_accept_head) == sizeof(long)) || sizeof(queue->rskq_accept_head) == sizeof(long long))) __compiletime_assert_437(); } while (0); do { *(volatile typeof(queue->rskq_accept_head) *)&(queue->rskq_accept_head) = (req->dl_next); } while (0); } while (0);
if (queue->rskq_accept_head == ((void *)0))
queue->rskq_accept_tail = ((void *)0);
}
spin_unlock_bh(&queue->rskq_lock);
return req;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void reqsk_queue_removed(struct request_sock_queue *queue,
const struct request_sock *req)
{
if (req->num_timeout == 0)
atomic_dec(&queue->young);
atomic_dec(&queue->qlen);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void reqsk_queue_added(struct request_sock_queue *queue)
{
atomic_inc(&queue->young);
atomic_inc(&queue->qlen);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int reqsk_queue_len(const struct request_sock_queue *queue)
{
return atomic_read(&queue->qlen);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int reqsk_queue_len_young(const struct request_sock_queue *queue)
{
return atomic_read(&queue->young);
}
# 24 "./include/net/inet_sock.h" 2
# 1 "./include/net/netns/hash.h" 1






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 net_hash_mix(const struct net *net)
{
return net->hash_mix;
}
# 25 "./include/net/inet_sock.h" 2
# 39 "./include/net/inet_sock.h"
struct ip_options {
__be32 faddr;
__be32 nexthop;
unsigned char optlen;
unsigned char srr;
unsigned char rr;
unsigned char ts;
unsigned char is_strictroute:1,
srr_is_hit:1,
is_changed:1,
rr_needaddr:1,
ts_needtime:1,
ts_needaddr:1;
unsigned char router_alert;
unsigned char cipso;
unsigned char __pad2;
unsigned char __data[];
};

struct ip_options_rcu {
struct callback_head rcu;
struct ip_options opt;
};

struct ip_options_data {
struct ip_options_rcu opt;
char data[40];
};

struct inet_request_sock {
struct request_sock req;
# 82 "./include/net/inet_sock.h"
u16 snd_wscale : 4,
rcv_wscale : 4,
tstamp_ok : 1,
sack_ok : 1,
wscale_ok : 1,
ecn_ok : 1,
acked : 1,
no_srccheck: 1,
smc_ok : 1;
u32 ir_mark;
union {
struct ip_options_rcu *ireq_opt;

struct {
struct ipv6_txoptions *ipv6_opt;
struct sk_buff *pktopts;
};

};
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inet_request_sock *inet_rsk(const struct request_sock *sk)
{
return (struct inet_request_sock *)sk;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
{
if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)
return skb->mark;

return sk->sk_mark;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet_request_bound_dev_if(const struct sock *sk,
struct sk_buff *skb)
{







return sk->__sk_common.skc_bound_dev_if;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet_sk_bound_l3mdev(const struct sock *sk)
{
# 139 "./include/net/inet_sock.h"
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if,
int dif, int sdif)
{
if (!bound_dev_if)
return !sdif || l3mdev_accept;
return bound_dev_if == dif || bound_dev_if == sdif;
}

struct inet_cork {
unsigned int flags;
__be32 addr;
struct ip_options *opt;
unsigned int fragsize;
int length;
struct dst_entry *dst;
u8 tx_flags;
__u8 ttl;
__s16 tos;
char priority;
__u16 gso_size;
u64 transmit_time;
u32 mark;
};

struct inet_cork_full {
struct inet_cork base;
struct flowi fl;
};

struct ip_mc_socklist;
struct ipv6_pinfo;
struct rtable;
# 195 "./include/net/inet_sock.h"
struct inet_sock {

struct sock sk;

struct ipv6_pinfo *pinet6;







__be32 inet_saddr;
__s16 uc_ttl;
__u16 cmsg_flags;
struct ip_options_rcu *inet_opt;
__be16 inet_sport;
__u16 inet_id;

__u8 tos;
__u8 min_ttl;
__u8 mc_ttl;
__u8 pmtudisc;
__u8 recverr:1,
is_icsk:1,
freebind:1,
hdrincl:1,
mc_loop:1,
transparent:1,
mc_all:1,
nodefrag:1;
__u8 bind_address_no_port:1,
recverr_rfc4884:1,
defer_connect:1;



__u8 rcv_tos;
__u8 convert_csum;
int uc_index;
int mc_index;
__be32 mc_addr;
struct ip_mc_socklist *mc_list;
struct inet_cork_full cork;
};
# 262 "./include/net/inet_sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *sk_to_full_sk(struct sock *sk)
{

if (sk && sk->__sk_common.skc_state == TCP_NEW_SYN_RECV)
sk = inet_reqsk(sk)->__req_common.skc_listener;

return sk;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct sock *sk_const_to_full_sk(const struct sock *sk)
{

if (sk && sk->__sk_common.skc_state == TCP_NEW_SYN_RECV)
sk = ((const struct request_sock *)sk)->__req_common.skc_listener;

return sk;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *skb_to_full_sk(const struct sk_buff *skb)
{
return sk_to_full_sk(skb->sk);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inet_sock *inet_sk(const struct sock *sk)
{
return (struct inet_sock *)sk;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __inet_sk_copy_descendant(struct sock *sk_to,
const struct sock *sk_from,
const int ancestor_size)
{
memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
sk_from->__sk_common.skc_prot->obj_size - ancestor_size);
}

int inet_sk_rebuild_header(struct sock *sk);
# 308 "./include/net/inet_sock.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet_sk_state_load(const struct sock *sk)
{

return ({ typeof(*&sk->__sk_common.skc_state) ___p1 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_438(void) ; if (!((sizeof(*&sk->__sk_common.skc_state) == sizeof(char) || sizeof(*&sk->__sk_common.skc_state) == sizeof(short) || sizeof(*&sk->__sk_common.skc_state) == sizeof(int) || sizeof(*&sk->__sk_common.skc_state) == sizeof(long)) || sizeof(*&sk->__sk_common.skc_state) == sizeof(long long))) __compiletime_assert_438(); } while (0); (*(const volatile typeof( _Generic((*&sk->__sk_common.skc_state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&sk->__sk_common.skc_state))) *)&(*&sk->__sk_common.skc_state)); }); do { __attribute__((__noreturn__)) extern void __compiletime_assert_439(void) ; if (!((sizeof(*&sk->__sk_common.skc_state) == sizeof(char) || sizeof(*&sk->__sk_common.skc_state) == sizeof(short) || sizeof(*&sk->__sk_common.skc_state) == sizeof(int) || sizeof(*&sk->__sk_common.skc_state) == sizeof(long)))) __compiletime_assert_439(); } while (0); __asm__ __volatile__ ("fence " "r" "," "rw" : : : "memory"); ___p1; });
}
# 322 "./include/net/inet_sock.h"
void inet_sk_state_store(struct sock *sk, int newstate);

void inet_sk_set_state(struct sock *sk, int state);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int __inet_ehashfn(const __be32 laddr,
const __u16 lport,
const __be32 faddr,
const __be16 fport,
u32 initval)
{
return jhash_3words(( __u32) laddr,
( __u32) faddr,
((__u32) lport) << 16 | ( __u32)fport,
initval);
}

struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
struct sock *sk_listener,
bool attach_listener);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u8 inet_sk_flowi_flags(const struct sock *sk)
{
__u8 flags = 0;

if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl)
flags |= 0x01;
return flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_inc_convert_csum(struct sock *sk)
{
inet_sk(sk)->convert_csum++;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_dec_convert_csum(struct sock *sk)
{
if (inet_sk(sk)->convert_csum > 0)
inet_sk(sk)->convert_csum--;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inet_get_convert_csum(struct sock *sk)
{
return !!inet_sk(sk)->convert_csum;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inet_can_nonlocal_bind(struct net *net,
struct inet_sock *inet)
{
return net->ipv4.sysctl_ip_nonlocal_bind ||
inet->freebind || inet->transparent;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inet_addr_valid_or_nonlocal(struct net *net,
struct inet_sock *inet,
__be32 addr,
int addr_type)
{
return inet_can_nonlocal_bind(net, inet) ||
addr == (( __be32)(__builtin_constant_p((__u32)((((unsigned long int) 0x00000000)))) ? ((__u32)( (((__u32)((((unsigned long int) 0x00000000))) & (__u32)0x000000ffUL) << 24) | (((__u32)((((unsigned long int) 0x00000000))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((((unsigned long int) 0x00000000))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((((unsigned long int) 0x00000000))) & (__u32)0xff000000UL) >> 24))) : __fswab32((((unsigned long int) 0x00000000))))) ||
addr_type == RTN_LOCAL ||
addr_type == RTN_MULTICAST ||
addr_type == RTN_BROADCAST;
}
# 22 "./include/net/inet_connection_sock.h" 2





struct inet_bind_bucket;
struct tcp_congestion_ops;





struct inet_connection_sock_af_ops {
int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
void (*send_check)(struct sock *sk, struct sk_buff *skb);
int (*rebuild_header)(struct sock *sk);
void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
int (*conn_request)(struct sock *sk, struct sk_buff *skb);
struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst,
struct request_sock *req_unhash,
bool *own_req);
u16 net_header_len;
u16 net_frag_header_len;
u16 sockaddr_len;
int (*setsockopt)(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen);
int (*getsockopt)(struct sock *sk, int level, int optname,
char *optval, int *optlen);
void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
void (*mtu_reduced)(struct sock *sk);
};
# 82 "./include/net/inet_connection_sock.h"
struct inet_connection_sock {

struct inet_sock icsk_inet;
struct request_sock_queue icsk_accept_queue;
struct inet_bind_bucket *icsk_bind_hash;
unsigned long icsk_timeout;
struct timer_list icsk_retransmit_timer;
struct timer_list icsk_delack_timer;
__u32 icsk_rto;
__u32 icsk_rto_min;
__u32 icsk_delack_max;
__u32 icsk_pmtu_cookie;
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
const struct tcp_ulp_ops *icsk_ulp_ops;
void *icsk_ulp_data;
void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
struct hlist_node icsk_listen_portaddr_node;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
__u8 icsk_ca_state:5,
icsk_ca_initialized:1,
icsk_ca_setsockopt:1,
icsk_ca_dst_locked:1;
__u8 icsk_retransmits;
__u8 icsk_pending;
__u8 icsk_backoff;
__u8 icsk_syn_retries;
__u8 icsk_probes_out;
__u16 icsk_ext_hdr_len;
struct {
__u8 pending;
__u8 quick;
__u8 pingpong;
__u8 retry;
__u32 ato;
unsigned long timeout;
__u32 lrcvtime;
__u16 last_seg_size;
__u16 rcv_mss;
} icsk_ack;
struct {

int search_high;
int search_low;


u32 probe_size:31,

enabled:1;

u32 probe_timestamp;
} icsk_mtup;
u32 icsk_probes_tstamp;
u32 icsk_user_timeout;

u64 icsk_ca_priv[104 / sizeof(u64)];

};







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inet_connection_sock *inet_csk(const struct sock *sk)
{
return (struct inet_connection_sock *)sk;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *inet_csk_ca(const struct sock *sk)
{
return (void *)inet_csk(sk)->icsk_ca_priv;
}

struct sock *inet_csk_clone_lock(const struct sock *sk,
const struct request_sock *req,
const gfp_t priority);

enum inet_csk_ack_state_t {
ICSK_ACK_SCHED = 1,
ICSK_ACK_TIMER = 2,
ICSK_ACK_PUSHED = 4,
ICSK_ACK_PUSHED2 = 8,
ICSK_ACK_NOW = 16
};

void inet_csk_init_xmit_timers(struct sock *sk,
void (*retransmit_handler)(struct timer_list *),
void (*delack_handler)(struct timer_list *),
void (*keepalive_handler)(struct timer_list *));
void inet_csk_clear_xmit_timers(struct sock *sk);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_csk_schedule_ack(struct sock *sk)
{
inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet_csk_ack_scheduled(const struct sock *sk)
{
return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_csk_delack_init(struct sock *sk)
{
memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
}

void inet_csk_delete_keepalive_timer(struct sock *sk);
void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
{
struct inet_connection_sock *icsk = inet_csk(sk);

if (what == 1 || what == 3) {
icsk->icsk_pending = 0;



} else if (what == 2) {
icsk->icsk_ack.pending = 0;
icsk->icsk_ack.retry = 0;



} else {
({ if (0) ({ do {} while (0); _printk("\001" "7" "IPv6: " "inet_csk BUG: unknown timer value\n"); }); 0; });
}
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
unsigned long when,
const unsigned long max_when)
{
struct inet_connection_sock *icsk = inet_csk(sk);

if (when > max_when) {
({ if (0) ({ do {} while (0); _printk("\001" "7" "IPv6: " "reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, (void *)({ __label__ __here; __here: (unsigned long)&&__here; })); }); 0; });

when = max_when;
}

if (what == 1 || what == 3 ||
what == 5 || what == 6) {
icsk->icsk_pending = what;
icsk->icsk_timeout = jiffies + when;
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
} else if (what == 2) {
icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
icsk->icsk_ack.timeout = jiffies + when;
sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
} else {
({ if (0) ({ do {} while (0); _printk("\001" "7" "IPv6: " "inet_csk BUG: unknown timer value\n"); }); 0; });
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
unsigned long max_when)
{
u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;

return (unsigned long)__builtin_choose_expr(((!!(sizeof((typeof((u64)(when)) *)1 == (typeof((u64)(max_when)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(when)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(max_when)) * 0l)) : (int *)8))))), (((u64)(when)) < ((u64)(max_when)) ? ((u64)(when)) : ((u64)(max_when))), ({ typeof((u64)(when)) __UNIQUE_ID___x440 = ((u64)(when)); typeof((u64)(max_when)) __UNIQUE_ID___y441 = ((u64)(max_when)); ((__UNIQUE_ID___x440) < (__UNIQUE_ID___y441) ? (__UNIQUE_ID___x440) : (__UNIQUE_ID___y441)); }));
}

struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);

int inet_csk_get_port(struct sock *sk, unsigned short snum);

struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
const struct request_sock *req);
struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
struct sock *newsk,
const struct request_sock *req);

struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
struct request_sock *req,
struct sock *child);
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout);
struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
struct request_sock *req,
bool own_req);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_csk_reqsk_queue_added(struct sock *sk)
{
reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet_csk_reqsk_queue_len(const struct sock *sk)
{
return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet_csk_reqsk_queue_is_full(const struct sock *sk)
{
return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
}

bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long
reqsk_timeout(struct request_sock *req, unsigned long max_timeout)
{
u64 timeout = (u64)req->timeout << req->num_timeout;

return (unsigned long)__builtin_choose_expr(((!!(sizeof((typeof((u64)(timeout)) *)1 == (typeof((u64)(max_timeout)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(timeout)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(max_timeout)) * 0l)) : (int *)8))))), (((u64)(timeout)) < ((u64)(max_timeout)) ? ((u64)(timeout)) : ((u64)(max_timeout))), ({ typeof((u64)(timeout)) __UNIQUE_ID___x442 = ((u64)(timeout)); typeof((u64)(max_timeout)) __UNIQUE_ID___y443 = ((u64)(max_timeout)); ((__UNIQUE_ID___x442) < (__UNIQUE_ID___y443) ? (__UNIQUE_ID___x442) : (__UNIQUE_ID___y443)); }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_csk_prepare_for_destroy_sock(struct sock *sk)
{

sock_set_flag(sk, SOCK_DEAD);
do { do { const void *__vpp_verify = (typeof((&(*sk->__sk_common.skc_prot->orphan_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*sk->__sk_common.skc_prot->orphan_count)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sk->__sk_common.skc_prot->orphan_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sk->__sk_common.skc_prot->orphan_count))) *)(&(*sk->__sk_common.skc_prot->orphan_count))); (typeof((typeof(*(&(*sk->__sk_common.skc_prot->orphan_count))) *)(&(*sk->__sk_common.skc_prot->orphan_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sk->__sk_common.skc_prot->orphan_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sk->__sk_common.skc_prot->orphan_count))) *)(&(*sk->__sk_common.skc_prot->orphan_count))); (typeof((typeof(*(&(*sk->__sk_common.skc_prot->orphan_count))) *)(&(*sk->__sk_common.skc_prot->orphan_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sk->__sk_common.skc_prot->orphan_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sk->__sk_common.skc_prot->orphan_count))) *)(&(*sk->__sk_common.skc_prot->orphan_count))); (typeof((typeof(*(&(*sk->__sk_common.skc_prot->orphan_count))) *)(&(*sk->__sk_common.skc_prot->orphan_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(*sk->__sk_common.skc_prot->orphan_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*sk->__sk_common.skc_prot->orphan_count))) *)(&(*sk->__sk_common.skc_prot->orphan_count))); (typeof((typeof(*(&(*sk->__sk_common.skc_prot->orphan_count))) *)(&(*sk->__sk_common.skc_prot->orphan_count)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

void inet_csk_destroy_sock(struct sock *sk);
void inet_csk_prepare_forced_close(struct sock *sk);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __poll_t inet_csk_listen_poll(const struct sock *sk)
{
return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
(( __poll_t)0x00000001 | ( __poll_t)0x00000040) : 0;
}

int inet_csk_listen_start(struct sock *sk);
void inet_csk_listen_stop(struct sock *sk);

void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);


void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
struct sock *sk);

struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_csk_enter_pingpong_mode(struct sock *sk)
{
inet_csk(sk)->icsk_ack.pingpong = 3;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_csk_exit_pingpong_mode(struct sock *sk)
{
inet_csk(sk)->icsk_ack.pingpong = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inet_csk_in_pingpong_mode(struct sock *sk)
{
return inet_csk(sk)->icsk_ack.pingpong >= 3;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_csk_inc_pingpong_cnt(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);

if (icsk->icsk_ack.pingpong < ((u8)~0U))
icsk->icsk_ack.pingpong++;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inet_csk_has_ulp(struct sock *sk)
{
return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
}
# 21 "./include/linux/tcp.h" 2
# 1 "./include/net/inet_timewait_sock.h" 1
# 22 "./include/net/inet_timewait_sock.h"
# 1 "./include/net/timewait_sock.h" 1
# 14 "./include/net/timewait_sock.h"
struct timewait_sock_ops {
struct kmem_cache *twsk_slab;
char *twsk_slab_name;
unsigned int twsk_obj_size;
int (*twsk_unique)(struct sock *sk,
struct sock *sktw, void *twp);
void (*twsk_destructor)(struct sock *sk);
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{
if (sk->__sk_common.skc_prot->twsk_prot->twsk_unique != ((void *)0))
return sk->__sk_common.skc_prot->twsk_prot->twsk_unique(sk, sktw, twp);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void twsk_destructor(struct sock *sk)
{
if (sk->__sk_common.skc_prot->twsk_prot->twsk_destructor != ((void *)0))
sk->__sk_common.skc_prot->twsk_prot->twsk_destructor(sk);
}
# 23 "./include/net/inet_timewait_sock.h" 2



struct inet_bind_bucket;






struct inet_timewait_sock {




struct sock_common __tw_common;
# 60 "./include/net/inet_timewait_sock.h"
__u32 tw_mark;
volatile unsigned char tw_substate;
unsigned char tw_rcv_wscale;



__be16 tw_sport;

unsigned int tw_transparent : 1,
tw_flowlabel : 20,
tw_pad : 3,
tw_tos : 8;
u32 tw_txhash;
u32 tw_priority;
u32 tw_bslot;
struct timer_list tw_timer;
struct inet_bind_bucket *tw_tb;
};


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inet_timewait_sock *inet_twsk(const struct sock *sk)
{
return (struct inet_timewait_sock *)sk;
}

void inet_twsk_free(struct inet_timewait_sock *tw);
void inet_twsk_put(struct inet_timewait_sock *tw);

void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
struct inet_hashinfo *hashinfo);

struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
struct inet_timewait_death_row *dr,
const int state);

void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
struct inet_hashinfo *hashinfo);

void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
bool rearm);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
{
__inet_twsk_schedule(tw, timeo, false);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
{
__inet_twsk_schedule(tw, timeo, true);
}

void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct net *twsk_net(const struct inet_timewait_sock *twsk)
{
return read_pnet(&twsk->__tw_common.skc_net);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net)
{
write_pnet(&twsk->__tw_common.skc_net, net);
}
# 22 "./include/linux/tcp.h" 2
# 1 "./include/uapi/linux/tcp.h" 1
# 25 "./include/uapi/linux/tcp.h"
struct tcphdr {
__be16 source;
__be16 dest;
__be32 seq;
__be32 ack_seq;

__u16 res1:4,
doff:4,
fin:1,
syn:1,
rst:1,
psh:1,
ack:1,
urg:1,
ece:1,
cwr:1;
# 55 "./include/uapi/linux/tcp.h"
__be16 window;
__sum16 check;
__be16 urg_ptr;
};






union tcp_word_hdr {
struct tcphdr hdr;
__be32 words[5];
};



enum {
TCP_FLAG_CWR = (( __be32)((__u32)( (((__u32)((0x00800000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x00800000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x00800000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x00800000)) & (__u32)0xff000000UL) >> 24)))),
TCP_FLAG_ECE = (( __be32)((__u32)( (((__u32)((0x00400000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x00400000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x00400000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x00400000)) & (__u32)0xff000000UL) >> 24)))),
TCP_FLAG_URG = (( __be32)((__u32)( (((__u32)((0x00200000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x00200000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x00200000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x00200000)) & (__u32)0xff000000UL) >> 24)))),
TCP_FLAG_ACK = (( __be32)((__u32)( (((__u32)((0x00100000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x00100000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x00100000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x00100000)) & (__u32)0xff000000UL) >> 24)))),
TCP_FLAG_PSH = (( __be32)((__u32)( (((__u32)((0x00080000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x00080000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x00080000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x00080000)) & (__u32)0xff000000UL) >> 24)))),
TCP_FLAG_RST = (( __be32)((__u32)( (((__u32)((0x00040000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x00040000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x00040000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x00040000)) & (__u32)0xff000000UL) >> 24)))),
TCP_FLAG_SYN = (( __be32)((__u32)( (((__u32)((0x00020000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x00020000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x00020000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x00020000)) & (__u32)0xff000000UL) >> 24)))),
TCP_FLAG_FIN = (( __be32)((__u32)( (((__u32)((0x00010000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x00010000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x00010000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x00010000)) & (__u32)0xff000000UL) >> 24)))),
TCP_RESERVED_BITS = (( __be32)((__u32)( (((__u32)((0x0F000000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x0F000000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x0F000000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x0F000000)) & (__u32)0xff000000UL) >> 24)))),
TCP_DATA_OFFSET = (( __be32)((__u32)( (((__u32)((0xF0000000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xF0000000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xF0000000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xF0000000)) & (__u32)0xff000000UL) >> 24))))
};
# 137 "./include/uapi/linux/tcp.h"
struct tcp_repair_opt {
__u32 opt_code;
__u32 opt_val;
};

struct tcp_repair_window {
__u32 snd_wl1;
__u32 snd_wnd;
__u32 max_window;

__u32 rcv_wnd;
__u32 rcv_wup;
};

enum {
TCP_NO_QUEUE,
TCP_RECV_QUEUE,
TCP_SEND_QUEUE,
TCP_QUEUES_NR,
};


enum tcp_fastopen_client_fail {
TFO_STATUS_UNSPEC,
TFO_COOKIE_UNAVAILABLE,
TFO_DATA_NOT_ACKED,
TFO_SYN_RETRANSMITTED,
};
# 179 "./include/uapi/linux/tcp.h"
enum tcp_ca_state {




TCP_CA_Open = 0,







TCP_CA_Disorder = 1,






TCP_CA_CWR = 2,





TCP_CA_Recovery = 3,




TCP_CA_Loss = 4

};

struct tcp_info {
__u8 tcpi_state;
__u8 tcpi_ca_state;
__u8 tcpi_retransmits;
__u8 tcpi_probes;
__u8 tcpi_backoff;
__u8 tcpi_options;
__u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
__u8 tcpi_delivery_rate_app_limited:1, tcpi_fastopen_client_fail:2;

__u32 tcpi_rto;
__u32 tcpi_ato;
__u32 tcpi_snd_mss;
__u32 tcpi_rcv_mss;

__u32 tcpi_unacked;
__u32 tcpi_sacked;
__u32 tcpi_lost;
__u32 tcpi_retrans;
__u32 tcpi_fackets;


__u32 tcpi_last_data_sent;
__u32 tcpi_last_ack_sent;
__u32 tcpi_last_data_recv;
__u32 tcpi_last_ack_recv;


__u32 tcpi_pmtu;
__u32 tcpi_rcv_ssthresh;
__u32 tcpi_rtt;
__u32 tcpi_rttvar;
__u32 tcpi_snd_ssthresh;
__u32 tcpi_snd_cwnd;
__u32 tcpi_advmss;
__u32 tcpi_reordering;

__u32 tcpi_rcv_rtt;
__u32 tcpi_rcv_space;

__u32 tcpi_total_retrans;

__u64 tcpi_pacing_rate;
__u64 tcpi_max_pacing_rate;
__u64 tcpi_bytes_acked;
__u64 tcpi_bytes_received;
__u32 tcpi_segs_out;
__u32 tcpi_segs_in;

__u32 tcpi_notsent_bytes;
__u32 tcpi_min_rtt;
__u32 tcpi_data_segs_in;
__u32 tcpi_data_segs_out;

__u64 tcpi_delivery_rate;

__u64 tcpi_busy_time;
__u64 tcpi_rwnd_limited;
__u64 tcpi_sndbuf_limited;

__u32 tcpi_delivered;
__u32 tcpi_delivered_ce;

__u64 tcpi_bytes_sent;
__u64 tcpi_bytes_retrans;
__u32 tcpi_dsack_dups;
__u32 tcpi_reord_seen;

__u32 tcpi_rcv_ooopack;

__u32 tcpi_snd_wnd;


};


enum {
TCP_NLA_PAD,
TCP_NLA_BUSY,
TCP_NLA_RWND_LIMITED,
TCP_NLA_SNDBUF_LIMITED,
TCP_NLA_DATA_SEGS_OUT,
TCP_NLA_TOTAL_RETRANS,
TCP_NLA_PACING_RATE,
TCP_NLA_DELIVERY_RATE,
TCP_NLA_SND_CWND,
TCP_NLA_REORDERING,
TCP_NLA_MIN_RTT,
TCP_NLA_RECUR_RETRANS,
TCP_NLA_DELIVERY_RATE_APP_LMT,
TCP_NLA_SNDQ_SIZE,
TCP_NLA_CA_STATE,
TCP_NLA_SND_SSTHRESH,
TCP_NLA_DELIVERED,
TCP_NLA_DELIVERED_CE,
TCP_NLA_BYTES_SENT,
TCP_NLA_BYTES_RETRANS,
TCP_NLA_DSACK_DUPS,
TCP_NLA_REORD_SEEN,
TCP_NLA_SRTT,
TCP_NLA_TIMEOUT_REHASH,
TCP_NLA_BYTES_NOTSENT,
TCP_NLA_EDT,
TCP_NLA_TTL,
};
# 327 "./include/uapi/linux/tcp.h"
struct tcp_md5sig {
struct __kernel_sockaddr_storage tcpm_addr;
__u8 tcpm_flags;
__u8 tcpm_prefixlen;
__u16 tcpm_keylen;
int tcpm_ifindex;
__u8 tcpm_key[80];
};


struct tcp_diag_md5sig {
__u8 tcpm_family;
__u8 tcpm_prefixlen;
__u16 tcpm_keylen;
__be32 tcpm_addr[4];
__u8 tcpm_key[80];
};




struct tcp_zerocopy_receive {
__u64 address;
__u32 length;
__u32 recv_skip_hint;
__u32 inq;
__s32 err;
__u64 copybuf_address;
__s32 copybuf_len;
__u32 flags;
__u64 msg_control;
__u64 msg_controllen;
__u32 msg_flags;
__u32 reserved;
};
# 23 "./include/linux/tcp.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct tcphdr *tcp_hdr(const struct sk_buff *skb)
{
return (struct tcphdr *)skb_transport_header(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int __tcp_hdrlen(const struct tcphdr *th)
{
return th->doff * 4;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int tcp_hdrlen(const struct sk_buff *skb)
{
return __tcp_hdrlen(tcp_hdr(skb));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
{
return (struct tcphdr *)skb_inner_transport_header(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int inner_tcp_hdrlen(const struct sk_buff *skb)
{
return inner_tcp_hdr(skb)->doff * 4;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int tcp_optlen(const struct sk_buff *skb)
{
return (tcp_hdr(skb)->doff - 5) * 4;
}







struct tcp_fastopen_cookie {
__le64 val[(((16) + (sizeof(u64)) - 1) / (sizeof(u64)))];
s8 len;
bool exp;
};


struct tcp_sack_block_wire {
__be32 start_seq;
__be32 end_seq;
};

struct tcp_sack_block {
u32 start_seq;
u32 end_seq;
};





struct tcp_options_received {

int ts_recent_stamp;
u32 ts_recent;
u32 rcv_tsval;
u32 rcv_tsecr;
u16 saw_tstamp : 1,
tstamp_ok : 1,
dsack : 1,
wscale_ok : 1,
sack_ok : 3,
smc_ok : 1,
snd_wscale : 4,
rcv_wscale : 4;
u8 saw_unknown:1,
unused:7;
u8 num_sacks;
u16 user_mss;
u16 mss_clamp;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_clear_options(struct tcp_options_received *rx_opt)
{
rx_opt->tstamp_ok = rx_opt->sack_ok = 0;
rx_opt->wscale_ok = rx_opt->snd_wscale = 0;



}







struct tcp_request_sock_ops;

struct tcp_request_sock {
struct inet_request_sock req;
const struct tcp_request_sock_ops *af_specific;
u64 snt_synack;
bool tfo_listener;
bool is_mptcp;



u32 txhash;
u32 rcv_isn;
u32 snt_isn;
u32 ts_off;
u32 last_oow_ack_time;
u32 rcv_nxt;



u8 syn_tos;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
{
return (struct tcp_request_sock *)req;
}

struct tcp_sock {

struct inet_connection_sock inet_conn;
u16 tcp_header_len;
u16 gso_segs;





__be32 pred_flags;






u64 bytes_received;



u32 segs_in;


u32 data_segs_in;


u32 rcv_nxt;
u32 copied_seq;
u32 rcv_wup;
u32 snd_nxt;
u32 segs_out;


u32 data_segs_out;


u64 bytes_sent;


u64 bytes_acked;



u32 dsack_dups;


u32 snd_una;
u32 snd_sml;
u32 rcv_tstamp;
u32 lsndtime;
u32 last_oow_ack_time;
u32 compressed_ack_rcv_nxt;

u32 tsoffset;

struct list_head tsq_node;
struct list_head tsorted_sent_queue;

u32 snd_wl1;
u32 snd_wnd;
u32 max_window;
u32 mss_cache;

u32 window_clamp;
u32 rcv_ssthresh;


struct tcp_rack {
u64 mstamp;
u32 rtt_us;
u32 end_seq;
u32 last_delivered;
u8 reo_wnd_steps;

u8 reo_wnd_persist:5,
dsack_seen:1,
advanced:1;
} rack;
u16 advmss;
u8 compressed_ack;
u8 dup_ack_counter:2,
tlp_retrans:1,
unused:5;
u32 chrono_start;
u32 chrono_stat[3];
u8 chrono_type:2,
rate_app_limited:1,
fastopen_connect:1,
fastopen_no_cookie:1,
is_sack_reneg:1,
fastopen_client_fail:2;
u8 nonagle : 4,
thin_lto : 1,
recvmsg_inq : 1,
repair : 1,
frto : 1;
u8 repair_queue;
u8 save_syn:2,
syn_data:1,
syn_fastopen:1,
syn_fastopen_exp:1,
syn_fastopen_ch:1,
syn_data_acked:1,
is_cwnd_limited:1;
u32 tlp_high_seq;

u32 tcp_tx_delay;
u64 tcp_wstamp_ns;
u64 tcp_clock_cache;


u64 tcp_mstamp;
u32 srtt_us;
u32 mdev_us;
u32 mdev_max_us;
u32 rttvar_us;
u32 rtt_seq;
struct minmax rtt_min;

u32 packets_out;
u32 retrans_out;
u32 max_packets_out;
u32 max_packets_seq;

u16 urg_data;
u8 ecn_flags;
u8 keepalive_probes;
u32 reordering;
u32 reord_seen;
u32 snd_up;




struct tcp_options_received rx_opt;




u32 snd_ssthresh;
u32 snd_cwnd;
u32 snd_cwnd_cnt;
u32 snd_cwnd_clamp;
u32 snd_cwnd_used;
u32 snd_cwnd_stamp;
u32 prior_cwnd;
u32 prr_delivered;

u32 prr_out;
u32 delivered;
u32 delivered_ce;
u32 lost;
u32 app_limited;
u64 first_tx_mstamp;
u64 delivered_mstamp;
u32 rate_delivered;
u32 rate_interval_us;

u32 rcv_wnd;
u32 write_seq;
u32 notsent_lowat;
u32 pushed_seq;
u32 lost_out;
u32 sacked_out;

struct hrtimer pacing_timer;
struct hrtimer compressed_ack_timer;


struct sk_buff* lost_skb_hint;
struct sk_buff *retransmit_skb_hint;


struct rb_root out_of_order_queue;
struct sk_buff *ooo_last_skb;


struct tcp_sack_block duplicate_sack[1];
struct tcp_sack_block selective_acks[4];

struct tcp_sack_block recv_sack_cache[4];

struct sk_buff *highest_sack;





int lost_cnt_hint;

u32 prior_ssthresh;
u32 high_seq;

u32 retrans_stamp;


u32 undo_marker;
int undo_retrans;
u64 bytes_retrans;


u32 total_retrans;

u32 urg_seq;
unsigned int keepalive_time;
unsigned int keepalive_intvl;

int linger2;




u8 bpf_sock_ops_cb_flags;







u16 timeout_rehash;

u32 rcv_ooopack;


u32 rcv_rtt_last_tsecr;
struct {
u32 rtt_us;
u32 seq;
u64 time;
} rcv_rtt_est;


struct {
u32 space;
u32 seq;
u64 time;
} rcvq_space;


struct {
u32 probe_seq_start;
u32 probe_seq_end;
} mtu_probe;
u32 mtu_info;
# 410 "./include/linux/tcp.h"
struct tcp_fastopen_request *fastopen_req;



struct request_sock *fastopen_rsk;
struct saved_syn *saved_syn;
};

enum tsq_enum {
TSQ_THROTTLED,
TSQ_QUEUED,
TCP_TSQ_DEFERRED,
TCP_WRITE_TIMER_DEFERRED,
TCP_DELACK_TIMER_DEFERRED,
TCP_MTU_REDUCED_DEFERRED,


};

enum tsq_flags {
TSQF_THROTTLED = (1UL << TSQ_THROTTLED),
TSQF_QUEUED = (1UL << TSQ_QUEUED),
TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED),
TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED),
TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED),
TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED),
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct tcp_sock *tcp_sk(const struct sock *sk)
{
return (struct tcp_sock *)sk;
}

struct tcp_timewait_sock {
struct inet_timewait_sock tw_sk;


u32 tw_rcv_wnd;
u32 tw_ts_offset;
u32 tw_ts_recent;


u32 tw_last_oow_ack_time;

int tw_ts_recent_stamp;
u32 tw_tx_delay;



};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
{
return (struct tcp_timewait_sock *)sk;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_passive_fastopen(const struct sock *sk)
{
return sk->__sk_common.skc_state == TCP_SYN_RECV &&
({ typeof(*(tcp_sk(sk)->fastopen_rsk)) *__UNIQUE_ID_rcu444 = (typeof(*(tcp_sk(sk)->fastopen_rsk)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_445(void) ; if (!((sizeof((tcp_sk(sk)->fastopen_rsk)) == sizeof(char) || sizeof((tcp_sk(sk)->fastopen_rsk)) == sizeof(short) || sizeof((tcp_sk(sk)->fastopen_rsk)) == sizeof(int) || sizeof((tcp_sk(sk)->fastopen_rsk)) == sizeof(long)) || sizeof((tcp_sk(sk)->fastopen_rsk)) == sizeof(long long))) __compiletime_assert_445(); } while (0); (*(const volatile typeof( _Generic(((tcp_sk(sk)->fastopen_rsk)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tcp_sk(sk)->fastopen_rsk)))) *)&((tcp_sk(sk)->fastopen_rsk))); }); ; ((typeof(*(tcp_sk(sk)->fastopen_rsk)) *)(__UNIQUE_ID_rcu444)); }) != ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fastopen_queue_tune(struct sock *sk, int backlog)
{
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
int somaxconn = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_446(void) ; if (!((sizeof(sock_net(sk)->core.sysctl_somaxconn) == sizeof(char) || sizeof(sock_net(sk)->core.sysctl_somaxconn) == sizeof(short) || sizeof(sock_net(sk)->core.sysctl_somaxconn) == sizeof(int) || sizeof(sock_net(sk)->core.sysctl_somaxconn) == sizeof(long)) || sizeof(sock_net(sk)->core.sysctl_somaxconn) == sizeof(long long))) __compiletime_assert_446(); } while (0); (*(const volatile typeof( _Generic((sock_net(sk)->core.sysctl_somaxconn), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sock_net(sk)->core.sysctl_somaxconn))) *)&(sock_net(sk)->core.sysctl_somaxconn)); });

queue->fastopenq.max_qlen = __builtin_choose_expr(((!!(sizeof((typeof((unsigned int)(backlog)) *)1 == (typeof((unsigned int)(somaxconn)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned int)(backlog)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned int)(somaxconn)) * 0l)) : (int *)8))))), (((unsigned int)(backlog)) < ((unsigned int)(somaxconn)) ? ((unsigned int)(backlog)) : ((unsigned int)(somaxconn))), ({ typeof((unsigned int)(backlog)) __UNIQUE_ID___x447 = ((unsigned int)(backlog)); typeof((unsigned int)(somaxconn)) __UNIQUE_ID___y448 = ((unsigned int)(somaxconn)); ((__UNIQUE_ID___x447) < (__UNIQUE_ID___y448) ? (__UNIQUE_ID___x447) : (__UNIQUE_ID___y448)); }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_move_syn(struct tcp_sock *tp,
struct request_sock *req)
{
tp->saved_syn = req->saved_syn;
req->saved_syn = ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_saved_syn_free(struct tcp_sock *tp)
{
kfree(tp->saved_syn);
tp->saved_syn = ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_saved_syn_len(const struct saved_syn *saved_syn)
{
return saved_syn->mac_hdrlen + saved_syn->network_hdrlen +
saved_syn->tcp_hdrlen;
}

struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
const struct sk_buff *orig_skb,
const struct sk_buff *ack_skb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
{



u16 user_mss = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_449(void) ; if (!((sizeof(tp->rx_opt.user_mss) == sizeof(char) || sizeof(tp->rx_opt.user_mss) == sizeof(short) || sizeof(tp->rx_opt.user_mss) == sizeof(int) || sizeof(tp->rx_opt.user_mss) == sizeof(long)) || sizeof(tp->rx_opt.user_mss) == sizeof(long long))) __compiletime_assert_449(); } while (0); (*(const volatile typeof( _Generic((tp->rx_opt.user_mss), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (tp->rx_opt.user_mss))) *)&(tp->rx_opt.user_mss)); });

return (user_mss && user_mss < mss) ? user_mss : mss;
}

int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
int shiftlen);

void __tcp_sock_set_cork(struct sock *sk, bool on);
void tcp_sock_set_cork(struct sock *sk, bool on);
int tcp_sock_set_keepcnt(struct sock *sk, int val);
int tcp_sock_set_keepidle_locked(struct sock *sk, int val);
int tcp_sock_set_keepidle(struct sock *sk, int val);
int tcp_sock_set_keepintvl(struct sock *sk, int val);
void __tcp_sock_set_nodelay(struct sock *sk, bool on);
void tcp_sock_set_nodelay(struct sock *sk);
void tcp_sock_set_quickack(struct sock *sk, int val);
int tcp_sock_set_syncnt(struct sock *sk, int val);
void tcp_sock_set_user_timeout(struct sock *sk, u32 val);
# 93 "./include/linux/ipv6.h" 2
# 1 "./include/linux/udp.h" 1
# 19 "./include/linux/udp.h"
# 1 "./include/uapi/linux/udp.h" 1
# 23 "./include/uapi/linux/udp.h"
struct udphdr {
__be16 source;
__be16 dest;
__be16 len;
__sum16 check;
};
# 20 "./include/linux/udp.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct udphdr *udp_hdr(const struct sk_buff *skb)
{
return (struct udphdr *)skb_transport_header(skb);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
{
return (num + net_hash_mix(net)) & mask;
}

struct udp_sock {

struct inet_sock inet;



int pending;
unsigned int corkflag;
__u8 encap_type;
unsigned char no_check6_tx:1,
no_check6_rx:1,
encap_enabled:1,




gro_enabled:1,
accept_udp_l4:1,
accept_udp_fraglist:1;




__u16 len;
__u16 gso_size;



__u16 pcslen;
__u16 pcrlen;




__u8 pcflag;
__u8 unused[3];



int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
void (*encap_destroy)(struct sock *sk);


struct sk_buff * (*gro_receive)(struct sock *sk,
struct list_head *head,
struct sk_buff *skb);
int (*gro_complete)(struct sock *sk,
struct sk_buff *skb,
int nhoff);


struct sk_buff_head reader_queue __attribute__((__aligned__((1 << 6))));


int forward_deficit;
};



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct udp_sock *udp_sk(const struct sock *sk)
{
return (struct udp_sock *)sk;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void udp_set_no_check6_tx(struct sock *sk, bool val)
{
udp_sk(sk)->no_check6_tx = val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void udp_set_no_check6_rx(struct sock *sk, bool val)
{
udp_sk(sk)->no_check6_rx = val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool udp_get_no_check6_tx(struct sock *sk)
{
return udp_sk(sk)->no_check6_tx;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool udp_get_no_check6_rx(struct sock *sk)
{
return udp_sk(sk)->no_check6_rx;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
int gso_size;

if (((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type & SKB_GSO_UDP_L4) {
gso_size = ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_size;
put_cmsg(msg, 17, 104, sizeof(gso_size), &gso_size);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
{
if (!skb_is_gso(skb))
return false;

if (((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
return true;

if (((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
return true;

return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void udp_allow_gso(struct sock *sk)
{
udp_sk(sk)->accept_udp_l4 = 1;
udp_sk(sk)->accept_udp_fraglist = 1;
}
# 94 "./include/linux/ipv6.h" 2



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb)
{
return (struct ipv6hdr *)skb_network_header(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ipv6hdr *inner_ipv6_hdr(const struct sk_buff *skb)
{
return (struct ipv6hdr *)skb_inner_network_header(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
{
return (struct ipv6hdr *)skb_transport_header(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int ipv6_transport_len(const struct sk_buff *skb)
{
return (__builtin_constant_p((__u16)(( __u16)(__be16)(ipv6_hdr(skb)->payload_len))) ? ((__u16)( (((__u16)(( __u16)(__be16)(ipv6_hdr(skb)->payload_len)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(ipv6_hdr(skb)->payload_len)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(ipv6_hdr(skb)->payload_len))) + sizeof(struct ipv6hdr) -
skb_network_header_len(skb);
}






struct inet6_skb_parm {
int iif;
__be16 ra;
__u16 dst0;
__u16 srcrt;
__u16 dst1;
__u16 lastopt;
__u16 nhoff;
__u16 flags;



__u16 frag_max_size;
__u16 srhoff;
# 147 "./include/linux/ipv6.h"
};







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_l3mdev_skb(__u16 flags)
{
return false;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet6_iif(const struct sk_buff *skb)
{
bool l3_slave = ipv6_l3mdev_skb(((struct inet6_skb_parm*)((skb)->cb))->flags);

return l3_slave ? skb->skb_iif : ((struct inet6_skb_parm*)((skb)->cb))->iif;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inet6_is_jumbogram(const struct sk_buff *skb)
{
return !!(((struct inet6_skb_parm*)((skb)->cb))->flags & 128);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet6_sdif(const struct sk_buff *skb)
{




return 0;
}

struct tcp6_request_sock {
struct tcp_request_sock tcp6rsk_tcp;
};

struct ipv6_mc_socklist;
struct ipv6_ac_socklist;
struct ipv6_fl_socklist;

struct inet6_cork {
struct ipv6_txoptions *opt;
u8 hop_limit;
u8 tclass;
};
# 208 "./include/linux/ipv6.h"
struct ipv6_pinfo {
struct in6_addr saddr;
struct in6_pktinfo sticky_pktinfo;
const struct in6_addr *daddr_cache;




__be32 flow_label;
__u32 frag_size;
# 227 "./include/linux/ipv6.h"
__u16 __unused_1:7;
__s16 hop_limit:9;
# 237 "./include/linux/ipv6.h"
__u16 mc_loop:1,
__unused_2:6;
__s16 mcast_hops:9;

int ucast_oif;
int mcast_oif;


union {
struct {
__u16 srcrt:1,
osrcrt:1,
rxinfo:1,
rxoinfo:1,
rxhlim:1,
rxohlim:1,
hopopts:1,
ohopopts:1,
dstopts:1,
odstopts:1,
rxflow:1,
rxtclass:1,
rxpmtu:1,
rxorigdstaddr:1,
recvfragsize:1;

} bits;
__u16 all;
} rxopt;


__u16 recverr:1,
sndflow:1,
repflow:1,
pmtudisc:3,
padding:1,
srcprefs:3,



dontfrag:1,
autoflowlabel:1,
autoflowlabel_set:1,
mc_all:1,
recverr_rfc4884:1,
rtalert_isolate:1;
__u8 min_hopcount;
__u8 tclass;
__be32 rcv_flowinfo;

__u32 dst_cookie;

struct ipv6_mc_socklist *ipv6_mc_list;
struct ipv6_ac_socklist *ipv6_ac_list;
struct ipv6_fl_socklist *ipv6_fl_list;

struct ipv6_txoptions *opt;
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
struct inet6_cork cork;
};


struct raw6_sock {

struct inet_sock inet;
__u32 checksum;
__u32 offset;
struct icmp6_filter filter;
__u32 ip6mr_table;

struct ipv6_pinfo inet6;
};

struct udp6_sock {
struct udp_sock udp;

struct ipv6_pinfo inet6;
};

struct tcp6_sock {
struct tcp_sock tcp;

struct ipv6_pinfo inet6;
};

extern int inet6_sk_rebuild_header(struct sock *sk);

struct tcp6_timewait_sock {
struct tcp_timewait_sock tcp6tw_tcp;
};


bool ipv6_mod_enabled(void);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
{
return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct raw6_sock *raw6_sk(const struct sock *sk)
{
return (struct raw6_sock *)sk;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
{
if (sk->__sk_common.skc_family == 10)
return &sk->__sk_common.skc_v6_rcv_saddr;
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet_v6_ipv6only(const struct sock *sk)
{

return ((sk->__sk_common.skc_ipv6only));
}
# 13 "./include/net/ipv6.h" 2



# 1 "./include/linux/jump_label_ratelimit.h" 1
# 64 "./include/linux/jump_label_ratelimit.h"
struct static_key_deferred {
struct static_key key;
};
struct static_key_true_deferred {
struct static_key_true key;
};
struct static_key_false_deferred {
struct static_key_false key;
};







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
({ int __ret_warn_on = !!(!static_key_initialized); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("%s(): static key '%pS' used before call to jump_label_init()", __func__, (key)); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/jump_label_ratelimit.h"), "i" (82), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
static_key_slow_dec(&key->key);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void static_key_deferred_flush(void *key)
{
({ int __ret_warn_on = !!(!static_key_initialized); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("%s(): static key '%pS' used before call to jump_label_init()", __func__, (key)); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/jump_label_ratelimit.h"), "i" (87), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
({ int __ret_warn_on = !!(!static_key_initialized); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("%s(): static key '%pS' used before call to jump_label_init()", __func__, (key)); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/jump_label_ratelimit.h"), "i" (93), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
}
# 17 "./include/net/ipv6.h" 2
# 1 "./include/net/if_inet6.h" 1
# 29 "./include/net/if_inet6.h"
enum {
INET6_IFADDR_STATE_PREDAD,
INET6_IFADDR_STATE_DAD,
INET6_IFADDR_STATE_POSTDAD,
INET6_IFADDR_STATE_ERRDAD,
INET6_IFADDR_STATE_DEAD,
};

struct inet6_ifaddr {
struct in6_addr addr;
__u32 prefix_len;
__u32 rt_priority;


__u32 valid_lft;
__u32 prefered_lft;
refcount_t refcnt;
spinlock_t lock;

int state;

__u32 flags;
__u8 dad_probes;
__u8 stable_privacy_retry;

__u16 scope;
__u64 dad_nonce;

unsigned long cstamp;
unsigned long tstamp;

struct delayed_work dad_work;

struct inet6_dev *idev;
struct fib6_info *rt;

struct hlist_node addr_lst;
struct list_head if_list;

struct list_head tmp_list;
struct inet6_ifaddr *ifpub;
int regen_count;

bool tokenized;

u8 ifa_proto;

struct callback_head rcu;
struct in6_addr peer_addr;
};

struct ip6_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
struct callback_head rcu;
struct in6_addr sl_addr[];
};



struct ipv6_mc_socklist {
struct in6_addr addr;
int ifindex;
unsigned int sfmode;
struct ipv6_mc_socklist *next;
struct ip6_sf_socklist *sflist;
struct callback_head rcu;
};

struct ip6_sf_list {
struct ip6_sf_list *sf_next;
struct in6_addr sf_addr;
unsigned long sf_count[2];
unsigned char sf_gsresp;
unsigned char sf_oldin;
unsigned char sf_crcount;
struct callback_head rcu;
};







struct ifmcaddr6 {
struct in6_addr mca_addr;
struct inet6_dev *idev;
struct ifmcaddr6 *next;
struct ip6_sf_list *mca_sources;
struct ip6_sf_list *mca_tomb;
unsigned int mca_sfmode;
unsigned char mca_crcount;
unsigned long mca_sfcount[2];
struct delayed_work mca_work;
unsigned int mca_flags;
int mca_users;
refcount_t mca_refcnt;
unsigned long mca_cstamp;
unsigned long mca_tstamp;
struct callback_head rcu;
};



struct ipv6_ac_socklist {
struct in6_addr acl_addr;
int acl_ifindex;
struct ipv6_ac_socklist *acl_next;
};

struct ifacaddr6 {
struct in6_addr aca_addr;
struct fib6_info *aca_rt;
struct ifacaddr6 *aca_next;
struct hlist_node aca_addr_lst;
int aca_users;
refcount_t aca_refcnt;
unsigned long aca_cstamp;
unsigned long aca_tstamp;
struct callback_head rcu;
};





struct ipv6_devstat {
struct proc_dir_entry *proc_dir_entry;
__typeof__(struct ipstats_mib) *ipv6;
__typeof__(struct icmpv6_mib_device) *icmpv6dev;
__typeof__(struct icmpv6msg_mib_device) *icmpv6msgdev;
};

struct inet6_dev {
struct net_device *dev;
netdevice_tracker dev_tracker;

struct list_head addr_list;

struct ifmcaddr6 *mc_list;
struct ifmcaddr6 *mc_tomb;

unsigned char mc_qrv;
unsigned char mc_gq_running;
unsigned char mc_ifc_count;
unsigned char mc_dad_count;

unsigned long mc_v1_seen;
unsigned long mc_qi;
unsigned long mc_qri;
unsigned long mc_maxdelay;

struct delayed_work mc_gq_work;
struct delayed_work mc_ifc_work;
struct delayed_work mc_dad_work;
struct delayed_work mc_query_work;
struct delayed_work mc_report_work;

struct sk_buff_head mc_query_queue;
struct sk_buff_head mc_report_queue;

spinlock_t mc_query_lock;
spinlock_t mc_report_lock;
struct mutex mc_lock;

struct ifacaddr6 *ac_list;
rwlock_t lock;
refcount_t refcnt;
__u32 if_flags;
int dead;

u32 desync_factor;
struct list_head tempaddr_list;

struct in6_addr token;

struct neigh_parms *nd_parms;
struct ipv6_devconf cnf;
struct ipv6_devstat stats;

struct timer_list rs_timer;
__s32 rs_interval;
__u8 rs_probes;

unsigned long tstamp;
struct callback_head rcu;

unsigned int ra_mtu;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf)
{






buf[0]= 0x33;
buf[1]= 0x33;

memcpy(buf + 2, &addr->in6_u.u6_addr32[3], sizeof(__u32));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipv6_arcnet_mc_map(const struct in6_addr *addr, char *buf)
{
buf[0] = 0x00;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipv6_ib_mc_map(const struct in6_addr *addr,
const unsigned char *broadcast, char *buf)
{
unsigned char scope = broadcast[5] & 0xF;

buf[0] = 0;
buf[1] = 0xff;
buf[2] = 0xff;
buf[3] = 0xff;
buf[4] = 0xff;
buf[5] = 0x10 | scope;
buf[6] = 0x60;
buf[7] = 0x1b;
buf[8] = broadcast[8];
buf[9] = broadcast[9];
memcpy(buf + 10, addr->in6_u.u6_addr8 + 6, 10);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ipv6_ipgre_mc_map(const struct in6_addr *addr,
const unsigned char *broadcast, char *buf)
{
if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) {
memcpy(buf, broadcast, 4);
} else {

if ((addr->in6_u.u6_addr32[0] | addr->in6_u.u6_addr32[1] |
(addr->in6_u.u6_addr32[2] ^ (( __be32)(__builtin_constant_p((__u32)((0x0000ffff))) ? ((__u32)( (((__u32)((0x0000ffff)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x0000ffff)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x0000ffff)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x0000ffff)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x0000ffff)))))) != 0)
return -22;
memcpy(buf, &addr->in6_u.u6_addr32[3], 4);
}
return 0;
}
# 18 "./include/net/ipv6.h" 2






struct ip_tunnel_info;
# 147 "./include/net/ipv6.h"
struct frag_hdr {
__u8 nexthdr;
__u8 reserved;
__be16 frag_off;
__be32 identification;
};




struct ip6_fraglist_iter {
struct ipv6hdr *tmp_hdr;
struct sk_buff *frag;
int offset;
unsigned int hlen;
__be32 frag_id;
u8 nexthdr;
};

int ip6_fraglist_init(struct sk_buff *skb, unsigned int hlen, u8 *prevhdr,
u8 nexthdr, __be32 frag_id,
struct ip6_fraglist_iter *iter);
void ip6_fraglist_prepare(struct sk_buff *skb, struct ip6_fraglist_iter *iter);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *ip6_fraglist_next(struct ip6_fraglist_iter *iter)
{
struct sk_buff *skb = iter->frag;

iter->frag = skb->next;
skb_mark_not_on_list(skb);

return skb;
}

struct ip6_frag_state {
u8 *prevhdr;
unsigned int hlen;
unsigned int mtu;
unsigned int left;
int offset;
int ptr;
int hroom;
int troom;
__be32 frag_id;
u8 nexthdr;
};

void ip6_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int mtu,
unsigned short needed_tailroom, int hdr_room, u8 *prevhdr,
u8 nexthdr, __be32 frag_id, struct ip6_frag_state *state);
struct sk_buff *ip6_frag_next(struct sk_buff *skb,
struct ip6_frag_state *state);







extern int sysctl_mld_max_msf;
extern int sysctl_mld_qrv;
# 275 "./include/net/ipv6.h"
struct ip6_ra_chain {
struct ip6_ra_chain *next;
struct sock *sk;
int sel;
void (*destructor)(struct sock *);
};

extern struct ip6_ra_chain *ip6_ra_chain;
extern rwlock_t ip6_ra_lock;






struct ipv6_txoptions {
refcount_t refcnt;

int tot_len;



__u16 opt_flen;
__u16 opt_nflen;

struct ipv6_opt_hdr *hopopt;
struct ipv6_opt_hdr *dst0opt;
struct ipv6_rt_hdr *srcrt;
struct ipv6_opt_hdr *dst1opt;
struct callback_head rcu;

};


enum flowlabel_reflect {
FLOWLABEL_REFLECT_ESTABLISHED = 1,
FLOWLABEL_REFLECT_TCP_RESET = 2,
FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4,
};

struct ip6_flowlabel {
struct ip6_flowlabel *next;
__be32 label;
atomic_t users;
struct in6_addr dst;
struct ipv6_txoptions *opt;
unsigned long linger;
struct callback_head rcu;
u8 share;
union {
struct pid *pid;
kuid_t uid;
} owner;
unsigned long lastuse;
unsigned long expires;
struct net *fl_net;
};
# 340 "./include/net/ipv6.h"
struct ipv6_fl_socklist {
struct ipv6_fl_socklist *next;
struct ip6_flowlabel *fl;
struct callback_head rcu;
};

struct ipcm6_cookie {
struct sockcm_cookie sockc;
__s16 hlimit;
__s16 tclass;
__u16 gso_size;
__s8 dontfrag;
struct ipv6_txoptions *opt;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipcm6_init(struct ipcm6_cookie *ipc6)
{
*ipc6 = (struct ipcm6_cookie) {
.hlimit = -1,
.tclass = -1,
.dontfrag = -1,
};
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
const struct ipv6_pinfo *np)
{
*ipc6 = (struct ipcm6_cookie) {
.hlimit = -1,
.tclass = np->tclass,
.dontfrag = np->dontfrag,
};
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
{
struct ipv6_txoptions *opt;

rcu_read_lock();
opt = ({ typeof(*(np->opt)) *__UNIQUE_ID_rcu450 = (typeof(*(np->opt)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_451(void) ; if (!((sizeof((np->opt)) == sizeof(char) || sizeof((np->opt)) == sizeof(short) || sizeof((np->opt)) == sizeof(int) || sizeof((np->opt)) == sizeof(long)) || sizeof((np->opt)) == sizeof(long long))) __compiletime_assert_451(); } while (0); (*(const volatile typeof( _Generic(((np->opt)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((np->opt)))) *)&((np->opt))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(np->opt)) *)(__UNIQUE_ID_rcu450)); });
if (opt) {
if (!refcount_inc_not_zero(&opt->refcnt))
opt = ((void *)0);
else
opt = (opt);
}
rcu_read_unlock();
return opt;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void txopt_put(struct ipv6_txoptions *opt)
{
if (opt && refcount_dec_and_test(&opt->refcnt))
do { typeof (opt) ___p = (opt); if (___p) { do { __attribute__((__noreturn__)) extern void __compiletime_assert_452(void) ; if (!(!(!((__builtin_offsetof(typeof(*(opt)), rcu)) < 4096)))) __compiletime_assert_452(); } while (0); kvfree_call_rcu(&((___p)->rcu), (rcu_callback_t)(unsigned long) (__builtin_offsetof(typeof(*(opt)), rcu))); } } while (0);
}


struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label);

extern struct static_key_false_deferred ipv6_flowlabel_exclusive;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk,
__be32 label)
{
if (__builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&ipv6_flowlabel_exclusive.key)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&ipv6_flowlabel_exclusive.key)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&ipv6_flowlabel_exclusive.key)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&ipv6_flowlabel_exclusive.key)->key) > 0; })), 0) &&
({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_453(void) ; if (!((sizeof(sock_net(sk)->ipv6.flowlabel_has_excl) == sizeof(char) || sizeof(sock_net(sk)->ipv6.flowlabel_has_excl) == sizeof(short) || sizeof(sock_net(sk)->ipv6.flowlabel_has_excl) == sizeof(int) || sizeof(sock_net(sk)->ipv6.flowlabel_has_excl) == sizeof(long)) || sizeof(sock_net(sk)->ipv6.flowlabel_has_excl) == sizeof(long long))) __compiletime_assert_453(); } while (0); (*(const volatile typeof( _Generic((sock_net(sk)->ipv6.flowlabel_has_excl), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sock_net(sk)->ipv6.flowlabel_has_excl))) *)&(sock_net(sk)->ipv6.flowlabel_has_excl)); }))
return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-2);

return ((void *)0);
}


struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
struct ip6_flowlabel *fl,
struct ipv6_txoptions *fopt);
void fl6_free_socklist(struct sock *sk);
int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen);
int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
int flags);
int ip6_flowlabel_init(void);
void ip6_flowlabel_cleanup(void);
bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fl6_sock_release(struct ip6_flowlabel *fl)
{
if (fl)
atomic_dec(&fl->users);
}

void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);

void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct icmp6hdr *thdr, int len);

int ip6_ra_control(struct sock *sk, int sel);

int ipv6_parse_hopopts(struct sk_buff *skb);

struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
struct ipv6_txoptions *opt);
struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
struct ipv6_txoptions *opt,
int newtype,
struct ipv6_opt_hdr *newopt);
struct ipv6_txoptions *__ipv6_fixup_options(struct ipv6_txoptions *opt_space,
struct ipv6_txoptions *opt);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ipv6_txoptions *
ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt)
{
if (!opt)
return ((void *)0);
return __ipv6_fixup_options(opt_space, opt);
}

bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
const struct inet6_skb_parm *opt);
struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
struct ipv6_txoptions *opt);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_accept_ra(struct inet6_dev *idev)
{



return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 :
idev->cnf.accept_ra;
}





int __ipv6_addr_type(const struct in6_addr *addr);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ipv6_addr_type(const struct in6_addr *addr)
{
return __ipv6_addr_type(addr) & 0xffff;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ipv6_addr_scope(const struct in6_addr *addr)
{
return __ipv6_addr_type(addr) & 0x00f0U;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __ipv6_addr_src_scope(int type)
{
return (type == 0x0000U) ? -1 : (type >> 16);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ipv6_addr_src_scope(const struct in6_addr *addr)
{
return __ipv6_addr_src_scope(__ipv6_addr_type(addr));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool __ipv6_addr_needs_scope_id(int type)
{
return type & 0x0020U ||
(type & 0x0002U &&
(type & (0x0010U|0x0020U)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface)
{
return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2)
{
return memcmp(a1, a2, sizeof(struct in6_addr));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
const struct in6_addr *a2)
{
# 522 "./include/net/ipv6.h"
return !!(((a1->in6_u.u6_addr32[0] ^ a2->in6_u.u6_addr32[0]) & m->in6_u.u6_addr32[0]) |
((a1->in6_u.u6_addr32[1] ^ a2->in6_u.u6_addr32[1]) & m->in6_u.u6_addr32[1]) |
((a1->in6_u.u6_addr32[2] ^ a2->in6_u.u6_addr32[2]) & m->in6_u.u6_addr32[2]) |
((a1->in6_u.u6_addr32[3] ^ a2->in6_u.u6_addr32[3]) & m->in6_u.u6_addr32[3]));

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipv6_addr_prefix(struct in6_addr *pfx,
const struct in6_addr *addr,
int plen)
{

int o = plen >> 3,
b = plen & 0x7;

memset(pfx->in6_u.u6_addr8, 0, sizeof(pfx->in6_u.u6_addr8));
memcpy(pfx->in6_u.u6_addr8, addr, o);
if (b != 0)
pfx->in6_u.u6_addr8[o] = addr->in6_u.u6_addr8[o] & (0xff00 >> b);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipv6_addr_prefix_copy(struct in6_addr *addr,
const struct in6_addr *pfx,
int plen)
{

int o = plen >> 3,
b = plen & 0x7;

memcpy(addr->in6_u.u6_addr8, pfx, o);
if (b != 0) {
addr->in6_u.u6_addr8[o] &= ~(0xff00 >> b);
addr->in6_u.u6_addr8[o] |= (pfx->in6_u.u6_addr8[o] & (0xff00 >> b));
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __ipv6_addr_set_half(__be32 *addr,
__be32 wh, __be32 wl)
{
# 574 "./include/net/ipv6.h"
addr[0] = wh;
addr[1] = wl;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipv6_addr_set(struct in6_addr *addr,
__be32 w1, __be32 w2,
__be32 w3, __be32 w4)
{
__ipv6_addr_set_half(&addr->in6_u.u6_addr32[0], w1, w2);
__ipv6_addr_set_half(&addr->in6_u.u6_addr32[2], w3, w4);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_addr_equal(const struct in6_addr *a1,
const struct in6_addr *a2)
{






return ((a1->in6_u.u6_addr32[0] ^ a2->in6_u.u6_addr32[0]) |
(a1->in6_u.u6_addr32[1] ^ a2->in6_u.u6_addr32[1]) |
(a1->in6_u.u6_addr32[2] ^ a2->in6_u.u6_addr32[2]) |
(a1->in6_u.u6_addr32[3] ^ a2->in6_u.u6_addr32[3])) == 0;

}
# 627 "./include/net/ipv6.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_prefix_equal(const struct in6_addr *addr1,
const struct in6_addr *addr2,
unsigned int prefixlen)
{
const __be32 *a1 = addr1->in6_u.u6_addr32;
const __be32 *a2 = addr2->in6_u.u6_addr32;
unsigned int pdw, pbi;


pdw = prefixlen >> 5;
if (pdw && memcmp(a1, a2, pdw << 2))
return false;


pbi = prefixlen & 0x1f;
if (pbi && ((a1[pdw] ^ a2[pdw]) & (( __be32)(__builtin_constant_p((__u32)(((0xffffffff) << (32 - pbi)))) ? ((__u32)( (((__u32)(((0xffffffff) << (32 - pbi))) & (__u32)0x000000ffUL) << 24) | (((__u32)(((0xffffffff) << (32 - pbi))) & (__u32)0x0000ff00UL) << 8) | (((__u32)(((0xffffffff) << (32 - pbi))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(((0xffffffff) << (32 - pbi))) & (__u32)0xff000000UL) >> 24))) : __fswab32(((0xffffffff) << (32 - pbi)))))))
return false;

return true;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_addr_any(const struct in6_addr *a)
{





return (a->in6_u.u6_addr32[0] | a->in6_u.u6_addr32[1] |
a->in6_u.u6_addr32[2] | a->in6_u.u6_addr32[3]) == 0;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 ipv6_addr_hash(const struct in6_addr *a)
{






return ( u32)(a->in6_u.u6_addr32[0] ^ a->in6_u.u6_addr32[1] ^
a->in6_u.u6_addr32[2] ^ a->in6_u.u6_addr32[3]);

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
{
u32 v = ( u32)a->in6_u.u6_addr32[0] ^ ( u32)a->in6_u.u6_addr32[1];

return jhash_3words(v,
( u32)a->in6_u.u6_addr32[2],
( u32)a->in6_u.u6_addr32[3],
initval);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_addr_loopback(const struct in6_addr *a)
{





return (a->in6_u.u6_addr32[0] | a->in6_u.u6_addr32[1] |
a->in6_u.u6_addr32[2] | (a->in6_u.u6_addr32[3] ^ (( __be32)(__builtin_constant_p((__u32)((1))) ? ((__u32)( (((__u32)((1)) & (__u32)0x000000ffUL) << 24) | (((__u32)((1)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((1)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((1)) & (__u32)0xff000000UL) >> 24))) : __fswab32((1)))))) == 0;

}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_addr_v4mapped(const struct in6_addr *a)
{
return (



( unsigned long)(a->in6_u.u6_addr32[0] | a->in6_u.u6_addr32[1]) |

( unsigned long)(a->in6_u.u6_addr32[2] ^
(( __be32)(__builtin_constant_p((__u32)((0x0000ffff))) ? ((__u32)( (((__u32)((0x0000ffff)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x0000ffff)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x0000ffff)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x0000ffff)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x0000ffff)))))) == 0UL;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
{
return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->in6_u.u6_addr32[3]);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 ipv6_portaddr_hash(const struct net *net,
const struct in6_addr *addr6,
unsigned int port)
{
unsigned int hash, mix = net_hash_mix(net);

if (ipv6_addr_any(addr6))
hash = jhash_1word(0, mix);
else if (ipv6_addr_v4mapped(addr6))
hash = jhash_1word(( u32)addr6->in6_u.u6_addr32[3], mix);
else
hash = jhash2(( u32 *)addr6->in6_u.u6_addr32, 4, mix);

return hash ^ port;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_addr_orchid(const struct in6_addr *a)
{
return (a->in6_u.u6_addr32[0] & (( __be32)(__builtin_constant_p((__u32)((0xfffffff0))) ? ((__u32)( (((__u32)((0xfffffff0)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xfffffff0)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xfffffff0)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xfffffff0)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xfffffff0))))) == (( __be32)(__builtin_constant_p((__u32)((0x20010010))) ? ((__u32)( (((__u32)((0x20010010)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x20010010)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x20010010)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x20010010)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x20010010))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_addr_is_multicast(const struct in6_addr *addr)
{
return (addr->in6_u.u6_addr32[0] & (( __be32)(__builtin_constant_p((__u32)((0xFF000000))) ? ((__u32)( (((__u32)((0xFF000000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xFF000000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xFF000000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xFF000000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xFF000000))))) == (( __be32)(__builtin_constant_p((__u32)((0xFF000000))) ? ((__u32)( (((__u32)((0xFF000000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xFF000000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xFF000000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xFF000000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xFF000000))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipv6_addr_set_v4mapped(const __be32 addr,
struct in6_addr *v4mapped)
{
ipv6_addr_set(v4mapped,
0, 0,
(( __be32)(__builtin_constant_p((__u32)((0x0000FFFF))) ? ((__u32)( (((__u32)((0x0000FFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x0000FFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x0000FFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x0000FFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x0000FFFF)))),
addr);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __ipv6_addr_diff32(const void *token1, const void *token2, int addrlen)
{
const __be32 *a1 = token1, *a2 = token2;
int i;

addrlen >>= 2;

for (i = 0; i < addrlen; i++) {
__be32 xb = a1[i] ^ a2[i];
if (xb)
return i * 32 + 31 - __fls((__builtin_constant_p((__u32)(( __u32)(__be32)(xb))) ? ((__u32)( (((__u32)(( __u32)(__be32)(xb)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(xb)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(xb)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(xb)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(xb))));
}
# 790 "./include/net/ipv6.h"
return addrlen << 5;
}
# 811 "./include/net/ipv6.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen)
{




return __ipv6_addr_diff32(token1, token2, addrlen);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2)
{
return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
}

__be32 ipv6_select_ident(struct net *net,
const struct in6_addr *daddr,
const struct in6_addr *saddr);
__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);

int ip6_dst_hoplimit(struct dst_entry *dst);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
struct dst_entry *dst)
{
int hlimit;

if (ipv6_addr_is_multicast(&fl6->daddr))
hlimit = np->mcast_hops;
else
hlimit = np->hop_limit;
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
return hlimit;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
const struct ipv6hdr *iph)
{
do { __attribute__((__noreturn__)) extern void __compiletime_assert_454(void) ; if (!(!(__builtin_offsetof(typeof(flow->addrs), v6addrs.dst) != __builtin_offsetof(typeof(flow->addrs), v6addrs.src) + sizeof(flow->addrs.v6addrs.src)))) __compiletime_assert_454(); } while (0);


memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_can_nonlocal_bind(struct net *net,
struct inet_sock *inet)
{
return net->ipv6.sysctl.ip_nonlocal_bind ||
inet->freebind || inet->transparent;
}
# 879 "./include/net/ipv6.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
__be32 flowlabel, bool autolabel,
struct flowi6 *fl6)
{
u32 hash;




flowlabel &= (( __be32)(__builtin_constant_p((__u32)((0x000FFFFF))) ? ((__u32)( (((__u32)((0x000FFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x000FFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x000FFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x000FFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x000FFFFF))));

if (flowlabel ||
net->ipv6.sysctl.auto_flowlabels == 0 ||
(!autolabel &&
net->ipv6.sysctl.auto_flowlabels != 3))
return flowlabel;

hash = skb_get_hash_flowi6(skb, fl6);





hash = rol32(hash, 16);

flowlabel = ( __be32)hash & (( __be32)(__builtin_constant_p((__u32)((0x000FFFFF))) ? ((__u32)( (((__u32)((0x000FFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x000FFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x000FFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x000FFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x000FFFFF))));

if (net->ipv6.sysctl.flowlabel_state_ranges)
flowlabel |= (( __be32)(__builtin_constant_p((__u32)((0x00080000))) ? ((__u32)( (((__u32)((0x00080000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x00080000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x00080000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x00080000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x00080000))));

return flowlabel;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip6_default_np_autolabel(struct net *net)
{
switch (net->ipv6.sysctl.auto_flowlabels) {
case 0:
case 2:
default:
return 0;
case 1:
case 3:
return 1;
}
}
# 938 "./include/net/ipv6.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip6_multipath_hash_policy(const struct net *net)
{
return net->ipv6.sysctl.multipath_hash_policy;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 ip6_multipath_hash_fields(const struct net *net)
{
return net->ipv6.sysctl.multipath_hash_fields;
}
# 960 "./include/net/ipv6.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass,
__be32 flowlabel)
{
*(__be32 *)hdr = (( __be32)(__builtin_constant_p((__u32)((0x60000000 | (tclass << 20)))) ? ((__u32)( (((__u32)((0x60000000 | (tclass << 20))) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x60000000 | (tclass << 20))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x60000000 | (tclass << 20))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x60000000 | (tclass << 20))) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x60000000 | (tclass << 20))))) | flowlabel;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be32 ip6_flowinfo(const struct ipv6hdr *hdr)
{
return *(__be32 *)hdr & (( __be32)(__builtin_constant_p((__u32)((0x0FFFFFFF))) ? ((__u32)( (((__u32)((0x0FFFFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x0FFFFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x0FFFFFFF))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be32 ip6_flowlabel(const struct ipv6hdr *hdr)
{
return *(__be32 *)hdr & (( __be32)(__builtin_constant_p((__u32)((0x000FFFFF))) ? ((__u32)( (((__u32)((0x000FFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x000FFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x000FFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x000FFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x000FFFFF))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 ip6_tclass(__be32 flowinfo)
{
return (__builtin_constant_p((__u32)(( __u32)(__be32)(flowinfo & ((( __be32)(__builtin_constant_p((__u32)((0x0FFFFFFF))) ? ((__u32)( (((__u32)((0x0FFFFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x0FFFFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x0FFFFFFF)))) & ~(( __be32)(__builtin_constant_p((__u32)((0x000FFFFF))) ? ((__u32)( (((__u32)((0x000FFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x000FFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x000FFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x000FFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x000FFFFF)))))))) ? ((__u32)( (((__u32)(( __u32)(__be32)(flowinfo & ((( __be32)(__builtin_constant_p((__u32)((0x0FFFFFFF))) ? ((__u32)( (((__u32)((0x0FFFFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x0FFFFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x0FFFFFFF)))) & ~(( __be32)(__builtin_constant_p((__u32)((0x000FFFFF))) ? ((__u32)( (((__u32)((0x000FFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x000FFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x000FFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x000FFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x000FFFFF))))))) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(flowinfo & ((( __be32)(__builtin_constant_p((__u32)((0x0FFFFFFF))) ? ((__u32)( (((__u32)((0x0FFFFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x0FFFFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x0FFFFFFF)))) & ~(( __be32)(__builtin_constant_p((__u32)((0x000FFFFF))) ? ((__u32)( (((__u32)((0x000FFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x000FFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x000FFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x000FFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x000FFFFF))))))) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(flowinfo & ((( __be32)(__builtin_constant_p((__u32)((0x0FFFFFFF))) ? ((__u32)( (((__u32)((0x0FFFFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x0FFFFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x0FFFFFFF)))) & ~(( __be32)(__builtin_constant_p((__u32)((0x000FFFFF))) ? ((__u32)( (((__u32)((0x000FFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x000FFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x000FFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x000FFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x000FFFFF))))))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(flowinfo & ((( __be32)(__builtin_constant_p((__u32)((0x0FFFFFFF))) ? ((__u32)( (((__u32)((0x0FFFFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x0FFFFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x0FFFFFFF)))) & ~(( __be32)(__builtin_constant_p((__u32)((0x000FFFFF))) ? ((__u32)( (((__u32)((0x000FFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x000FFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x000FFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x000FFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x000FFFFF))))))) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(flowinfo & ((( __be32)(__builtin_constant_p((__u32)((0x0FFFFFFF))) ? ((__u32)( (((__u32)((0x0FFFFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x0FFFFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x0FFFFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x0FFFFFFF)))) & ~(( __be32)(__builtin_constant_p((__u32)((0x000FFFFF))) ? ((__u32)( (((__u32)((0x000FFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x000FFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x000FFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x000FFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x000FFFFF)))))))) >> 20;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) dscp_t ip6_dscp(__be32 flowinfo)
{
return inet_dsfield_to_dscp(ip6_tclass(flowinfo));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
{
return (( __be32)(__builtin_constant_p((__u32)((tclass << 20))) ? ((__u32)( (((__u32)((tclass << 20)) & (__u32)0x000000ffUL) << 24) | (((__u32)((tclass << 20)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((tclass << 20)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((tclass << 20)) & (__u32)0xff000000UL) >> 24))) : __fswab32((tclass << 20)))) | flowlabel;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
{
return fl6->flowlabel & (( __be32)(__builtin_constant_p((__u32)((0x000FFFFF))) ? ((__u32)( (((__u32)((0x000FFFFF)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x000FFFFF)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x000FFFFF)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x000FFFFF)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x000FFFFF))));
}
# 1004 "./include/net/ipv6.h"
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
struct net_device *orig_dev);

int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);




int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
__u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority);

int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);

int ip6_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
struct rt6_info *rt, unsigned int flags);

int ip6_push_pending_frames(struct sock *sk);

void ip6_flush_pending_frames(struct sock *sk);

int ip6_send_skb(struct sk_buff *skb);

struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
struct inet_cork_full *cork,
struct inet6_cork *v6_cork);
struct sk_buff *ip6_make_skb(struct sock *sk,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm6_cookie *ipc6,
struct rt6_info *rt, unsigned int flags,
struct inet_cork_full *cork);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *ip6_finish_skb(struct sock *sk)
{
return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork,
&inet6_sk(sk)->cork);
}

int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst);
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst,
bool connected);
struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
struct net_device *dev,
struct net *net, struct socket *sock,
struct in6_addr *saddr,
const struct ip_tunnel_info *info,
u8 protocol, bool use_cache);
struct dst_entry *ip6_blackhole_route(struct net *net,
struct dst_entry *orig_dst);





int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip6_forward(struct sk_buff *skb);
int ip6_input(struct sk_buff *skb);
int ip6_mc_input(struct sk_buff *skb);
void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
bool have_final);

int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);





void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
u8 *proto, struct in6_addr **daddr_p,
struct in6_addr *saddr);
void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
u8 *proto);

int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp,
__be16 *frag_offp);

bool ipv6_ext_hdr(u8 nexthdr);

enum {
IP6_FH_F_FRAG = (1 << 0),
IP6_FH_F_AUTH = (1 << 1),
IP6_FH_F_SKIP_RH = (1 << 2),
};


int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target,
unsigned short *fragoff, int *fragflg);

int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type);

struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
const struct ipv6_txoptions *opt,
struct in6_addr *orig);




extern struct static_key_false ip6_min_hopcount;

int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
int ipv6_getsockopt(struct sock *sk, int level, int optname,
char *optval, int *optlen);

int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
int addr_len);
int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
int addr_len);
int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
void ip6_datagram_release_cb(struct sock *sk);

int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
int *addr_len);
int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
int *addr_len);
void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
u32 info, u8 *payload);
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);

int inet6_release(struct socket *sock);
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
int peer);
int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int inet6_compat_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg);

int inet6_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
int inet6_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);




extern const struct proto_ops inet6_stream_ops;
extern const struct proto_ops inet6_dgram_ops;
extern const struct proto_ops inet6_sockraw_ops;

struct group_source_req;
struct group_filter;

int ip6_mc_source(int add, int omode, struct sock *sk,
struct group_source_req *pgsr);
int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
struct __kernel_sockaddr_storage *list);
int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
struct __kernel_sockaddr_storage *p);


int ac6_proc_init(struct net *net);
void ac6_proc_exit(struct net *net);
int raw6_proc_init(void);
void raw6_proc_exit(void);
int tcp6_proc_init(struct net *net);
void tcp6_proc_exit(struct net *net);
int udp6_proc_init(struct net *net);
void udp6_proc_exit(struct net *net);
int udplite6_proc_init(void);
void udplite6_proc_exit(void);
int ipv6_misc_proc_init(void);
void ipv6_misc_proc_exit(void);
int snmp6_register_dev(struct inet6_dev *idev);
int snmp6_unregister_dev(struct inet6_dev *idev);
# 1192 "./include/net/ipv6.h"
struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
struct ctl_table *ipv6_route_sysctl_init(struct net *net);
int ipv6_sysctl_register(void);
void ipv6_sysctl_unregister(void);


int ipv6_sock_mc_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
const struct in6_addr *addr, unsigned int mode);
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip6_sock_set_v6only(struct sock *sk)
{
if (inet_sk(sk)->sk.__sk_common.skc_num)
return -22;
lock_sock(sk);
sk->__sk_common.skc_ipv6only = true;
release_sock(sk);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip6_sock_set_recverr(struct sock *sk)
{
lock_sock(sk);
inet6_sk(sk)->recverr = true;
release_sock(sk);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
{
unsigned int pref = 0;
unsigned int prefmask = ~0;


switch (val & (0x0002 |
0x0001 |
0x0100)) {
case 0x0002:
pref |= 0x0002;
prefmask &= ~(0x0002 |
0x0001);
break;
case 0x0001:
pref |= 0x0001;
prefmask &= ~(0x0002 |
0x0001);
break;
case 0x0100:
prefmask &= ~(0x0002 |
0x0001);
break;
case 0:
break;
default:
return -22;
}


switch (val & (0x0400 | 0x0004)) {
case 0x0400:
prefmask &= ~0x0004;
break;
case 0x0004:
pref |= 0x0004;
break;
case 0:
break;
default:
return -22;
}


switch (val & (0x0008|0x0800)) {
case 0x0008:
case 0x0800:
case 0:
break;
default:
return -22;
}

inet6_sk(sk)->srcprefs = (inet6_sk(sk)->srcprefs & prefmask) | pref;
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip6_sock_set_addr_preferences(struct sock *sk, bool val)
{
int ret;

lock_sock(sk);
ret = __ip6_sock_set_addr_preferences(sk, val);
release_sock(sk);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip6_sock_set_recvpktinfo(struct sock *sk)
{
lock_sock(sk);
inet6_sk(sk)->rxopt.bits.rxinfo = true;
release_sock(sk);
}
# 17 "./include/net/inetpeer.h" 2



struct ipv4_addr_key {
__be32 addr;
int vif;
};



struct inetpeer_addr {
union {
struct ipv4_addr_key a4;
struct in6_addr a6;
u32 key[(sizeof(struct in6_addr) / sizeof(u32))];
};
__u16 family;
};

struct inet_peer {
struct rb_node rb_node;
struct inetpeer_addr daddr;

u32 metrics[(__RTAX_MAX - 1)];
u32 rate_tokens;
u32 n_redirects;
unsigned long rate_last;





union {
struct {
atomic_t rid;
};
struct callback_head rcu;
};


__u32 dtime;
refcount_t refcnt;
};

struct inet_peer_base {
struct rb_root rb_root;
seqlock_t lock;
int total;
};

void inet_peer_base_init(struct inet_peer_base *);

void inet_initpeers(void) __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi")));



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
{
iaddr->a4.addr = ip;
iaddr->a4.vif = 0;
iaddr->family = 2;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
{
return iaddr->a4.addr;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
struct in6_addr *in6)
{
iaddr->a6 = *in6;
iaddr->family = 10;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
{
return &iaddr->a6;
}


struct inet_peer *inet_getpeer(struct inet_peer_base *base,
const struct inetpeer_addr *daddr,
int create);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
__be32 v4daddr,
int vif, int create)
{
struct inetpeer_addr daddr;

daddr.a4.addr = v4daddr;
daddr.a4.vif = vif;
daddr.family = 2;
return inet_getpeer(base, &daddr, create);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
const struct in6_addr *v6daddr,
int create)
{
struct inetpeer_addr daddr;

daddr.a6 = *v6daddr;
daddr.family = 10;
return inet_getpeer(base, &daddr, create);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inetpeer_addr_cmp(const struct inetpeer_addr *a,
const struct inetpeer_addr *b)
{
int i, n;

if (a->family == 2)
n = sizeof(a->a4) / sizeof(u32);
else
n = sizeof(a->a6) / sizeof(u32);

for (i = 0; i < n; i++) {
if (a->key[i] == b->key[i])
continue;
if (a->key[i] < b->key[i])
return -1;
return 1;
}

return 0;
}


void inet_putpeer(struct inet_peer *p);
bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);

void inetpeer_invalidate_tree(struct inet_peer_base *);
# 22 "./include/net/ip_fib.h" 2




struct fib_config {
u8 fc_dst_len;
dscp_t fc_dscp;
u8 fc_protocol;
u8 fc_scope;
u8 fc_type;
u8 fc_gw_family;

u32 fc_table;
__be32 fc_dst;
union {
__be32 fc_gw4;
struct in6_addr fc_gw6;
};
int fc_oif;
u32 fc_flags;
u32 fc_priority;
__be32 fc_prefsrc;
u32 fc_nh_id;
struct nlattr *fc_mx;
struct rtnexthop *fc_mp;
int fc_mx_len;
int fc_mp_len;
u32 fc_flow;
u32 fc_nlflags;
struct nl_info fc_nlinfo;
struct nlattr *fc_encap;
u16 fc_encap_type;
};

struct fib_info;
struct rtable;

struct fib_nh_exception {
struct fib_nh_exception *fnhe_next;
int fnhe_genid;
__be32 fnhe_daddr;
u32 fnhe_pmtu;
bool fnhe_mtu_locked;
__be32 fnhe_gw;
unsigned long fnhe_expires;
struct rtable *fnhe_rth_input;
struct rtable *fnhe_rth_output;
unsigned long fnhe_stamp;
struct callback_head rcu;
};

struct fnhe_hash_bucket {
struct fib_nh_exception *chain;
};





struct fib_nh_common {
struct net_device *nhc_dev;
netdevice_tracker nhc_dev_tracker;
int nhc_oif;
unsigned char nhc_scope;
u8 nhc_family;
u8 nhc_gw_family;
unsigned char nhc_flags;
struct lwtunnel_state *nhc_lwtstate;

union {
__be32 ipv4;
struct in6_addr ipv6;
} nhc_gw;

int nhc_weight;
atomic_t nhc_upper_bound;


struct rtable * *nhc_pcpu_rth_output;
struct rtable *nhc_rth_input;
struct fnhe_hash_bucket *nhc_exceptions;
};

struct fib_nh {
struct fib_nh_common nh_common;
struct hlist_node nh_hash;
struct fib_info *nh_parent;



__be32 nh_saddr;
int nh_saddr_genid;
# 126 "./include/net/ip_fib.h"
};





struct nexthop;

struct fib_info {
struct hlist_node fib_hash;
struct hlist_node fib_lhash;
struct list_head nh_list;
struct net *fib_net;
refcount_t fib_treeref;
refcount_t fib_clntref;
unsigned int fib_flags;
unsigned char fib_dead;
unsigned char fib_protocol;
unsigned char fib_scope;
unsigned char fib_type;
__be32 fib_prefsrc;
u32 fib_tb_id;
u32 fib_priority;
struct dst_metrics *fib_metrics;




int fib_nhs;
bool fib_nh_is_v6;
bool nh_updated;
struct nexthop *nh;
struct callback_head rcu;
struct fib_nh fib_nh[];
};






struct fib_table;
struct fib_result {
__be32 prefix;
unsigned char prefixlen;
unsigned char nh_sel;
unsigned char type;
unsigned char scope;
u32 tclassid;
struct fib_nh_common *nhc;
struct fib_info *fi;
struct fib_table *table;
struct hlist_head *fa_head;
};

struct fib_result_nl {
__be32 fl_addr;
u32 fl_mark;
unsigned char fl_tos;
unsigned char fl_scope;
unsigned char tb_id_in;

unsigned char tb_id;
unsigned char prefixlen;
unsigned char nh_sel;
unsigned char type;
unsigned char scope;
int err;
};







__be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc,
unsigned char scope);
__be32 fib_result_prefsrc(struct net *net, struct fib_result *res);





struct fib_rt_info {
struct fib_info *fi;
u32 tb_id;
__be32 dst;
int dst_len;
u8 tos;
u8 type;
u8 offload:1,
trap:1,
offload_failed:1,
unused:5;
};

struct fib_entry_notifier_info {
struct fib_notifier_info info;
u32 dst;
int dst_len;
struct fib_info *fi;
u8 tos;
u8 type;
u32 tb_id;
};

struct fib_nh_notifier_info {
struct fib_notifier_info info;
struct fib_nh *fib_nh;
};

int call_fib4_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
struct fib_notifier_info *info);
int call_fib4_notifiers(struct net *net, enum fib_event_type event_type,
struct fib_notifier_info *info);

int fib4_notifier_init(struct net *net);
void fib4_notifier_exit(struct net *net);

void fib_info_notify_update(struct net *net, struct nl_info *info);
int fib_notify(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);

struct fib_table {
struct hlist_node tb_hlist;
u32 tb_id;
int tb_num_default;
struct callback_head rcu;
unsigned long *tb_data;
unsigned long __data[];
};

struct fib_dump_filter {
u32 table_id;

bool filter_set;
bool dump_routes;
bool dump_exceptions;
unsigned char protocol;
unsigned char rt_type;
unsigned int flags;
struct net_device *dev;
};

int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags);
int fib_table_insert(struct net *, struct fib_table *, struct fib_config *,
struct netlink_ext_ack *extack);
int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
struct netlink_ext_ack *extack);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb, struct fib_dump_filter *filter);
int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct fib_table *fib_get_table(struct net *net, u32 id)
{
struct hlist_node *tb_hlist;
struct hlist_head *ptr;

ptr = id == RT_TABLE_LOCAL ?
&net->ipv4.fib_table_hash[(RT_TABLE_LOCAL & (2 - 1))] :
&net->ipv4.fib_table_hash[(RT_TABLE_MAIN & (2 - 1))];

tb_hlist = ({ typeof(*((*((struct hlist_node **)(&(ptr)->first))))) *__UNIQUE_ID_rcu455 = (typeof(*((*((struct hlist_node **)(&(ptr)->first))))) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_456(void) ; if (!((sizeof(((*((struct hlist_node **)(&(ptr)->first))))) == sizeof(char) || sizeof(((*((struct hlist_node **)(&(ptr)->first))))) == sizeof(short) || sizeof(((*((struct hlist_node **)(&(ptr)->first))))) == sizeof(int) || sizeof(((*((struct hlist_node **)(&(ptr)->first))))) == sizeof(long)) || sizeof(((*((struct hlist_node **)(&(ptr)->first))))) == sizeof(long long))) __compiletime_assert_456(); } while (0); (*(const volatile typeof( _Generic((((*((struct hlist_node **)(&(ptr)->first))))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((*((struct hlist_node **)(&(ptr)->first))))))) *)&(((*((struct hlist_node **)(&(ptr)->first)))))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*((*((struct hlist_node **)(&(ptr)->first))))) *)(__UNIQUE_ID_rcu455)); });

return ({ void *__mptr = (void *)(tb_hlist); _Static_assert(__builtin_types_compatible_p(typeof(*(tb_hlist)), typeof(((struct fib_table *)0)->tb_hlist)) || __builtin_types_compatible_p(typeof(*(tb_hlist)), typeof(void)), "pointer type mismatch in container_of()"); ((struct fib_table *)(__mptr - __builtin_offsetof(struct fib_table, tb_hlist))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct fib_table *fib_new_table(struct net *net, u32 id)
{
return fib_get_table(net, id);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int fib_lookup(struct net *net, const struct flowi4 *flp,
struct fib_result *res, unsigned int flags)
{
struct fib_table *tb;
int err = -101;

rcu_read_lock();

tb = fib_get_table(net, RT_TABLE_MAIN);
if (tb)
err = fib_table_lookup(tb, flp, res, flags | 1);

if (err == -11)
err = -101;

rcu_read_unlock();

return err;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib4_has_custom_rules(const struct net *net)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib4_rule_default(const struct fib_rule *rule)
{
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int fib4_rules_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int fib4_rules_seq_read(struct net *net)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib4_rules_early_flow_dissect(struct net *net,
struct sk_buff *skb,
struct flowi4 *fl4,
struct flow_keys *flkeys)
{
return false;
}
# 432 "./include/net/ip_fib.h"
extern const struct nla_policy rtm_ipv4_policy[];
void ip_fib_init(void);
int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
struct netlink_ext_ack *extack);
__be32 fib_compute_spec_dst(struct sk_buff *skb);
bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev);
int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
u8 tos, int oif, struct net_device *dev,
struct in_device *idev, u32 *itag);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int fib_num_tclassid_users(struct net *net)
{
return 0;
}

int fib_unmerge(struct net *net);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc,
const struct net_device *dev)
{
if (nhc->nhc_dev == dev ||
l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex)
return true;

return false;
}


int ip_fib_check_default(__be32 gw, struct net_device *dev);
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
int fib_sync_down_addr(struct net_device *dev, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned char nh_flags);
void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig);
# 519 "./include/net/ip_fib.h"
int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope,
struct netlink_ext_ack *extack);
void fib_select_multipath(struct fib_result *res, int hash);
void fib_select_path(struct net *net, struct fib_result *res,
struct flowi4 *fl4, const struct sk_buff *skb);

int fib_nh_init(struct net *net, struct fib_nh *fib_nh,
struct fib_config *cfg, int nh_weight,
struct netlink_ext_ack *extack);
void fib_nh_release(struct net *net, struct fib_nh *fib_nh);
int fib_nh_common_init(struct net *net, struct fib_nh_common *nhc,
struct nlattr *fc_encap, u16 fc_encap_type,
void *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void fib_nh_common_release(struct fib_nh_common *nhc);


void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri);
void fib_trie_init(void);
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
const struct flowi4 *flp);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fib_combine_itag(u32 *itag, const struct fib_result *res)
{
# 565 "./include/net/ip_fib.h"
}

void fib_flush(struct net *net);
void free_fib_info(struct fib_info *fi);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fib_info_hold(struct fib_info *fi)
{
refcount_inc(&fi->fib_clntref);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fib_info_put(struct fib_info *fi)
{
if (refcount_dec_and_test(&fi->fib_clntref))
free_fib_info(fi);
}


int fib_proc_init(struct net *net);
void fib_proc_exit(struct net *net);
# 594 "./include/net/ip_fib.h"
u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr);

int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
struct fib_dump_filter *filter,
struct netlink_callback *cb);

int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
u8 rt_family, unsigned char *flags, bool skip_oif);
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
int nh_weight, u8 rt_family, u32 nh_tclassid);
# 11 "./include/linux/mroute_base.h" 2
# 28 "./include/linux/mroute_base.h"
struct vif_device {
struct net_device *dev;
netdevice_tracker dev_tracker;
unsigned long bytes_in, bytes_out;
unsigned long pkt_in, pkt_out;
unsigned long rate_limit;
unsigned char threshold;
unsigned short flags;
int link;


struct netdev_phys_item_id dev_parent_id;
__be32 local, remote;
};

struct vif_entry_notifier_info {
struct fib_notifier_info info;
struct net_device *dev;
unsigned short vif_index;
unsigned short vif_flags;
u32 tb_id;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mr_call_vif_notifier(struct notifier_block *nb,
unsigned short family,
enum fib_event_type event_type,
struct vif_device *vif,
unsigned short vif_index, u32 tb_id,
struct netlink_ext_ack *extack)
{
struct vif_entry_notifier_info info = {
.info = {
.family = family,
.extack = extack,
},
.dev = vif->dev,
.vif_index = vif_index,
.vif_flags = vif->flags,
.tb_id = tb_id,
};

return call_fib_notifier(nb, event_type, &info.info);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mr_call_vif_notifiers(struct net *net,
unsigned short family,
enum fib_event_type event_type,
struct vif_device *vif,
unsigned short vif_index, u32 tb_id,
unsigned int *ipmr_seq)
{
struct vif_entry_notifier_info info = {
.info = {
.family = family,
},
.dev = vif->dev,
.vif_index = vif_index,
.vif_flags = vif->flags,
.tb_id = tb_id,
};

({ static bool __attribute__((__section__(".data.once"))) __already_done; bool __ret_do_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_do_once && !__already_done), 0)) { __already_done = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/linux/mroute_base.h", 89); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mroute_base.h"), "i" (89), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_do_once), 0); });
(*ipmr_seq)++;
return call_fib_notifiers(net, event_type, &info.info);
}
# 107 "./include/linux/mroute_base.h"
enum {
MFC_STATIC = ((((1UL))) << (0)),
MFC_OFFLOAD = ((((1UL))) << (1)),
};
# 132 "./include/linux/mroute_base.h"
struct mr_mfc {
struct rhlist_head mnode;
unsigned short mfc_parent;
int mfc_flags;

union {
struct {
unsigned long expires;
struct sk_buff_head unresolved;
} unres;
struct {
unsigned long last_assert;
int minvif;
int maxvif;
unsigned long bytes;
unsigned long pkt;
unsigned long wrong_if;
unsigned long lastuse;
unsigned char ttls[32];
refcount_t refcount;
} res;
} mfc_un;
struct list_head list;
struct callback_head rcu;
void (*free)(struct callback_head *head);
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mr_cache_put(struct mr_mfc *c)
{
if (refcount_dec_and_test(&c->mfc_un.res.refcount))
call_rcu(&c->rcu, c->free);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mr_cache_hold(struct mr_mfc *c)
{
refcount_inc(&c->mfc_un.res.refcount);
}

struct mfc_entry_notifier_info {
struct fib_notifier_info info;
struct mr_mfc *mfc;
u32 tb_id;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mr_call_mfc_notifier(struct notifier_block *nb,
unsigned short family,
enum fib_event_type event_type,
struct mr_mfc *mfc, u32 tb_id,
struct netlink_ext_ack *extack)
{
struct mfc_entry_notifier_info info = {
.info = {
.family = family,
.extack = extack,
},
.mfc = mfc,
.tb_id = tb_id
};

return call_fib_notifier(nb, event_type, &info.info);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mr_call_mfc_notifiers(struct net *net,
unsigned short family,
enum fib_event_type event_type,
struct mr_mfc *mfc, u32 tb_id,
unsigned int *ipmr_seq)
{
struct mfc_entry_notifier_info info = {
.info = {
.family = family,
},
.mfc = mfc,
.tb_id = tb_id
};

({ static bool __attribute__((__section__(".data.once"))) __already_done; bool __ret_do_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_do_once && !__already_done), 0)) { __already_done = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/linux/mroute_base.h", 208); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/mroute_base.h"), "i" (208), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_do_once), 0); });
(*ipmr_seq)++;
return call_fib_notifiers(net, event_type, &info.info);
}

struct mr_table;






struct mr_table_ops {
const struct rhashtable_params *rht_params;
void *cmparg_any;
};
# 243 "./include/linux/mroute_base.h"
struct mr_table {
struct list_head list;
possible_net_t net;
struct mr_table_ops ops;
u32 id;
struct sock *mroute_sk;
struct timer_list ipmr_expire_timer;
struct list_head mfc_unres_queue;
struct vif_device vif_table[32];
struct rhltable mfc_hash;
struct list_head mfc_cache_list;
int maxvif;
atomic_t cache_resolve_queue_len;
bool mroute_do_assert;
bool mroute_do_pim;
bool mroute_do_wrvifwhole;
int mroute_reg_vif_num;
};
# 310 "./include/linux/mroute_base.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vif_device_init(struct vif_device *v,
struct net_device *dev,
unsigned long rate_limit,
unsigned char threshold,
unsigned short flags,
unsigned short get_iflink_mask)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *mr_mfc_find_parent(struct mr_table *mrt,
void *hasharg, int parent)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *mr_mfc_find_any_parent(struct mr_table *mrt,
int vifi)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mr_mfc *mr_mfc_find_any(struct mr_table *mrt,
int vifi, void *hasharg)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mr_mfc *c, struct rtmsg *rtm)
{
return -22;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
struct mr_table *(*iter)(struct net *net,
struct mr_table *mrt),
int (*fill)(struct mr_table *mrt,
struct sk_buff *skb,
u32 portid, u32 seq, struct mr_mfc *c,
int cmd, int flags),
spinlock_t *lock, struct fib_dump_filter *filter)
{
return -22;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mr_dump(struct net *net, struct notifier_block *nb,
unsigned short family,
int (*rules_dump)(struct net *net,
struct notifier_block *nb,
struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
rwlock_t *mrt_lock, struct netlink_ext_ack *extack)
{
return -22;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *mr_mfc_find(struct mr_table *mrt, void *hasharg)
{
return mr_mfc_find_parent(mrt, hasharg, -1);
}


struct mr_vif_iter {
struct seq_net_private p;
struct mr_table *mrt;
int ct;
};

struct mr_mfc_iter {
struct seq_net_private p;
struct mr_table *mrt;
struct list_head *cache;


spinlock_t *lock;
};
# 434 "./include/linux/mroute_base.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter,
loff_t pos)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *mr_vif_seq_next(struct seq_file *seq,
void *v, loff_t *pos)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *mr_mfc_seq_idx(struct net *net,
struct mr_mfc_iter *it, loff_t pos)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *mr_mfc_seq_next(struct seq_file *seq, void *v,
loff_t *pos)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos,
struct mr_table *mrt, spinlock_t *lock)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mr_mfc_seq_stop(struct seq_file *seq, void *v)
{
}
# 11 "./include/linux/mroute6.h" 2
# 20 "./include/linux/mroute6.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip6_mroute_opt(int opt)
{
return 0;
}


struct sock;
# 37 "./include/linux/mroute6.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip6_mroute_setsockopt(struct sock *sock, int optname,
sockptr_t optval, unsigned int optlen)
{
return -92;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int ip6_mroute_getsockopt(struct sock *sock,
int optname, char *optval, int *optlen)
{
return -92;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int ip6mr_ioctl(struct sock *sk, int cmd, void *arg)
{
return -515;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip6_mr_init(void)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip6_mr_cleanup(void)
{
return;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ip6mr_rule_default(const struct fib_rule *rule)
{
return true;
}




struct mfc6_cache_cmp_arg {
struct in6_addr mf6c_mcastgrp;
struct in6_addr mf6c_origin;
};

struct mfc6_cache {
struct mr_mfc _c;
union {
struct {
struct in6_addr mf6c_mcastgrp;
struct in6_addr mf6c_origin;
};
struct mfc6_cache_cmp_arg cmparg;
};
};



struct rtmsg;
extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
struct rtmsg *rtm, u32 portid);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
{
return false;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip6mr_sk_done(struct sock *sk)
{
return 0;
}
# 37 "net/ipv6/route.c" 2

# 1 "./include/linux/if_arp.h" 1
# 23 "./include/linux/if_arp.h"
# 1 "./include/uapi/linux/if_arp.h" 1
# 117 "./include/uapi/linux/if_arp.h"
struct arpreq {
struct sockaddr arp_pa;
struct sockaddr arp_ha;
int arp_flags;
struct sockaddr arp_netmask;
char arp_dev[16];
};

struct arpreq_old {
struct sockaddr arp_pa;
struct sockaddr arp_ha;
int arp_flags;
struct sockaddr arp_netmask;
};
# 145 "./include/uapi/linux/if_arp.h"
struct arphdr {
__be16 ar_hrd;
__be16 ar_pro;
unsigned char ar_hln;
unsigned char ar_pln;
__be16 ar_op;
# 162 "./include/uapi/linux/if_arp.h"
};
# 24 "./include/linux/if_arp.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct arphdr *arp_hdr(const struct sk_buff *skb)
{
return (struct arphdr *)skb_network_header(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int arp_hdr_len(const struct net_device *dev)
{
switch (dev->type) {





default:

return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool dev_is_mac_header_xmit(const struct net_device *dev)
{
switch (dev->type) {
case 768:
case 769:
case 776:
case 778:
case 823:
case 0xFFFF:
case 0xFFFE:
case 519:
case 779:
return false;
default:
return true;
}
}
# 39 "net/ipv6/route.c" 2
# 1 "./include/linux/proc_fs.h" 1
# 12 "./include/linux/proc_fs.h"
struct proc_dir_entry;
struct seq_file;
struct seq_operations;

enum {
# 25 "./include/linux/proc_fs.h"
PROC_ENTRY_PERMANENT = 1U << 0,

};

struct proc_ops {
unsigned int proc_flags;
int (*proc_open)(struct inode *, struct file *);
ssize_t (*proc_read)(struct file *, char *, size_t, loff_t *);
ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *);
ssize_t (*proc_write)(struct file *, const char *, size_t, loff_t *);

loff_t (*proc_lseek)(struct file *, loff_t, int);
int (*proc_release)(struct inode *, struct file *);
__poll_t (*proc_poll)(struct file *, struct poll_table_struct *);
long (*proc_ioctl)(struct file *, unsigned int, unsigned long);



int (*proc_mmap)(struct file *, struct vm_area_struct *);
unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
} ;


enum proc_hidepid {
HIDEPID_OFF = 0,
HIDEPID_NO_ACCESS = 1,
HIDEPID_INVISIBLE = 2,
HIDEPID_NOT_PTRACEABLE = 4,
};


enum proc_pidonly {
PROC_PIDONLY_OFF = 0,
PROC_PIDONLY_ON = 1,
};

struct proc_fs_info {
struct pid_namespace *pid_ns;
struct dentry *proc_self;
struct dentry *proc_thread_self;
kgid_t pid_gid;
enum proc_hidepid hide_pid;
enum proc_pidonly pidonly;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct proc_fs_info *proc_sb_info(struct super_block *sb)
{
return sb->s_fs_info;
}



typedef int (*proc_write_t)(struct file *, char *, size_t);

extern void proc_root_init(void);
extern void proc_flush_pid(struct pid *);

extern struct proc_dir_entry *proc_symlink(const char *,
struct proc_dir_entry *, const char *);
struct proc_dir_entry *_proc_mkdir(const char *, umode_t, struct proc_dir_entry *, void *, bool);
extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
struct proc_dir_entry *, void *);
extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
struct proc_dir_entry *);
struct proc_dir_entry *proc_create_mount_point(const char *name);

struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
struct proc_dir_entry *parent, const struct seq_operations *ops,
unsigned int state_size, void *data);




struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
struct proc_dir_entry *parent,
int (*show)(struct seq_file *, void *), void *data);



extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
struct proc_dir_entry *,
const struct proc_ops *,
void *);

struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops);
extern void proc_set_size(struct proc_dir_entry *, loff_t);
extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *pde_data(const struct inode *inode)
{
return inode->i_private;
}

extern void *proc_get_parent_data(const struct inode *);
extern void proc_remove(struct proc_dir_entry *);
extern void remove_proc_entry(const char *, struct proc_dir_entry *);
extern int remove_proc_subtree(const char *, struct proc_dir_entry *);

struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
struct proc_dir_entry *parent, const struct seq_operations *ops,
unsigned int state_size, void *data);


struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
struct proc_dir_entry *parent,
int (*show)(struct seq_file *, void *), void *data);
struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode,
struct proc_dir_entry *parent,
const struct seq_operations *ops,
proc_write_t write,
unsigned int state_size, void *data);
struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mode,
struct proc_dir_entry *parent,
int (*show)(struct seq_file *, void *),
proc_write_t write,
void *data);
extern struct pid *tgid_pidfd_to_pid(const struct file *file);

struct bpf_iter_aux_info;
extern int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux);
extern void bpf_iter_fini_seq_net(void *priv_data);
# 221 "./include/linux/proc_fs.h"
struct net;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct proc_dir_entry *proc_net_mkdir(
struct net *net, const char *name, struct proc_dir_entry *parent)
{
return _proc_mkdir(name, 0, parent, net, true);
}

struct ns_common;
int open_related_ns(struct ns_common *ns,
struct ns_common *(*get_ns)(struct ns_common *ns));


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pid_namespace *proc_pid_ns(struct super_block *sb)
{
return proc_sb_info(sb)->pid_ns;
}

bool proc_ns_file(const struct file *file);
# 40 "net/ipv6/route.c" 2








# 1 "./include/net/ip6_fib.h" 1
# 12 "./include/net/ip6_fib.h"
# 1 "./include/linux/ipv6_route.h" 1
# 11 "./include/linux/ipv6_route.h"
# 1 "./include/uapi/linux/ipv6_route.h" 1
# 43 "./include/uapi/linux/ipv6_route.h"
struct in6_rtmsg {
struct in6_addr rtmsg_dst;
struct in6_addr rtmsg_src;
struct in6_addr rtmsg_gateway;
__u32 rtmsg_type;
__u16 rtmsg_dst_len;
__u16 rtmsg_src_len;
__u32 rtmsg_metric;
unsigned long rtmsg_info;
__u32 rtmsg_flags;
int rtmsg_ifindex;
};
# 12 "./include/linux/ipv6_route.h" 2
# 13 "./include/net/ip6_fib.h" 2
# 23 "./include/net/ip6_fib.h"
# 1 "./include/uapi/linux/bpf.h" 1
# 12 "./include/uapi/linux/bpf.h"
# 1 "./include/uapi/linux/bpf_common.h" 1
# 13 "./include/uapi/linux/bpf.h" 2
# 53 "./include/uapi/linux/bpf.h"
enum {
BPF_REG_0 = 0,
BPF_REG_1,
BPF_REG_2,
BPF_REG_3,
BPF_REG_4,
BPF_REG_5,
BPF_REG_6,
BPF_REG_7,
BPF_REG_8,
BPF_REG_9,
BPF_REG_10,
__MAX_BPF_REG,
};




struct bpf_insn {
__u8 code;
__u8 dst_reg:4;
__u8 src_reg:4;
__s16 off;
__s32 imm;
};


struct bpf_lpm_trie_key {
__u32 prefixlen;
__u8 data[0];
};

struct bpf_cgroup_storage_key {
__u64 cgroup_inode_id;
__u32 attach_type;
};

union bpf_iter_link_info {
struct {
__u32 map_fd;
} map;
};
# 840 "./include/uapi/linux/bpf.h"
enum bpf_cmd {
BPF_MAP_CREATE,
BPF_MAP_LOOKUP_ELEM,
BPF_MAP_UPDATE_ELEM,
BPF_MAP_DELETE_ELEM,
BPF_MAP_GET_NEXT_KEY,
BPF_PROG_LOAD,
BPF_OBJ_PIN,
BPF_OBJ_GET,
BPF_PROG_ATTACH,
BPF_PROG_DETACH,
BPF_PROG_TEST_RUN,
BPF_PROG_RUN = BPF_PROG_TEST_RUN,
BPF_PROG_GET_NEXT_ID,
BPF_MAP_GET_NEXT_ID,
BPF_PROG_GET_FD_BY_ID,
BPF_MAP_GET_FD_BY_ID,
BPF_OBJ_GET_INFO_BY_FD,
BPF_PROG_QUERY,
BPF_RAW_TRACEPOINT_OPEN,
BPF_BTF_LOAD,
BPF_BTF_GET_FD_BY_ID,
BPF_TASK_FD_QUERY,
BPF_MAP_LOOKUP_AND_DELETE_ELEM,
BPF_MAP_FREEZE,
BPF_BTF_GET_NEXT_ID,
BPF_MAP_LOOKUP_BATCH,
BPF_MAP_LOOKUP_AND_DELETE_BATCH,
BPF_MAP_UPDATE_BATCH,
BPF_MAP_DELETE_BATCH,
BPF_LINK_CREATE,
BPF_LINK_UPDATE,
BPF_LINK_GET_FD_BY_ID,
BPF_LINK_GET_NEXT_ID,
BPF_ENABLE_STATS,
BPF_ITER_CREATE,
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
};

enum bpf_map_type {
BPF_MAP_TYPE_UNSPEC,
BPF_MAP_TYPE_HASH,
BPF_MAP_TYPE_ARRAY,
BPF_MAP_TYPE_PROG_ARRAY,
BPF_MAP_TYPE_PERF_EVENT_ARRAY,
BPF_MAP_TYPE_PERCPU_HASH,
BPF_MAP_TYPE_PERCPU_ARRAY,
BPF_MAP_TYPE_STACK_TRACE,
BPF_MAP_TYPE_CGROUP_ARRAY,
BPF_MAP_TYPE_LRU_HASH,
BPF_MAP_TYPE_LRU_PERCPU_HASH,
BPF_MAP_TYPE_LPM_TRIE,
BPF_MAP_TYPE_ARRAY_OF_MAPS,
BPF_MAP_TYPE_HASH_OF_MAPS,
BPF_MAP_TYPE_DEVMAP,
BPF_MAP_TYPE_SOCKMAP,
BPF_MAP_TYPE_CPUMAP,
BPF_MAP_TYPE_XSKMAP,
BPF_MAP_TYPE_SOCKHASH,
BPF_MAP_TYPE_CGROUP_STORAGE,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
BPF_MAP_TYPE_SK_STORAGE,
BPF_MAP_TYPE_DEVMAP_HASH,
BPF_MAP_TYPE_STRUCT_OPS,
BPF_MAP_TYPE_RINGBUF,
BPF_MAP_TYPE_INODE_STORAGE,
BPF_MAP_TYPE_TASK_STORAGE,
BPF_MAP_TYPE_BLOOM_FILTER,
};
# 922 "./include/uapi/linux/bpf.h"
enum bpf_prog_type {
BPF_PROG_TYPE_UNSPEC,
BPF_PROG_TYPE_SOCKET_FILTER,
BPF_PROG_TYPE_KPROBE,
BPF_PROG_TYPE_SCHED_CLS,
BPF_PROG_TYPE_SCHED_ACT,
BPF_PROG_TYPE_TRACEPOINT,
BPF_PROG_TYPE_XDP,
BPF_PROG_TYPE_PERF_EVENT,
BPF_PROG_TYPE_CGROUP_SKB,
BPF_PROG_TYPE_CGROUP_SOCK,
BPF_PROG_TYPE_LWT_IN,
BPF_PROG_TYPE_LWT_OUT,
BPF_PROG_TYPE_LWT_XMIT,
BPF_PROG_TYPE_SOCK_OPS,
BPF_PROG_TYPE_SK_SKB,
BPF_PROG_TYPE_CGROUP_DEVICE,
BPF_PROG_TYPE_SK_MSG,
BPF_PROG_TYPE_RAW_TRACEPOINT,
BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
BPF_PROG_TYPE_LWT_SEG6LOCAL,
BPF_PROG_TYPE_LIRC_MODE2,
BPF_PROG_TYPE_SK_REUSEPORT,
BPF_PROG_TYPE_FLOW_DISSECTOR,
BPF_PROG_TYPE_CGROUP_SYSCTL,
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
BPF_PROG_TYPE_CGROUP_SOCKOPT,
BPF_PROG_TYPE_TRACING,
BPF_PROG_TYPE_STRUCT_OPS,
BPF_PROG_TYPE_EXT,
BPF_PROG_TYPE_LSM,
BPF_PROG_TYPE_SK_LOOKUP,
BPF_PROG_TYPE_SYSCALL,
};

enum bpf_attach_type {
BPF_CGROUP_INET_INGRESS,
BPF_CGROUP_INET_EGRESS,
BPF_CGROUP_INET_SOCK_CREATE,
BPF_CGROUP_SOCK_OPS,
BPF_SK_SKB_STREAM_PARSER,
BPF_SK_SKB_STREAM_VERDICT,
BPF_CGROUP_DEVICE,
BPF_SK_MSG_VERDICT,
BPF_CGROUP_INET4_BIND,
BPF_CGROUP_INET6_BIND,
BPF_CGROUP_INET4_CONNECT,
BPF_CGROUP_INET6_CONNECT,
BPF_CGROUP_INET4_POST_BIND,
BPF_CGROUP_INET6_POST_BIND,
BPF_CGROUP_UDP4_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
BPF_LIRC_MODE2,
BPF_FLOW_DISSECTOR,
BPF_CGROUP_SYSCTL,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_GETSOCKOPT,
BPF_CGROUP_SETSOCKOPT,
BPF_TRACE_RAW_TP,
BPF_TRACE_FENTRY,
BPF_TRACE_FEXIT,
BPF_MODIFY_RETURN,
BPF_LSM_MAC,
BPF_TRACE_ITER,
BPF_CGROUP_INET4_GETPEERNAME,
BPF_CGROUP_INET6_GETPEERNAME,
BPF_CGROUP_INET4_GETSOCKNAME,
BPF_CGROUP_INET6_GETSOCKNAME,
BPF_XDP_DEVMAP,
BPF_CGROUP_INET_SOCK_RELEASE,
BPF_XDP_CPUMAP,
BPF_SK_LOOKUP,
BPF_XDP,
BPF_SK_SKB_VERDICT,
BPF_SK_REUSEPORT_SELECT,
BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
BPF_PERF_EVENT,
BPF_TRACE_KPROBE_MULTI,
__MAX_BPF_ATTACH_TYPE
};



enum bpf_link_type {
BPF_LINK_TYPE_UNSPEC = 0,
BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
BPF_LINK_TYPE_TRACING = 2,
BPF_LINK_TYPE_CGROUP = 3,
BPF_LINK_TYPE_ITER = 4,
BPF_LINK_TYPE_NETNS = 5,
BPF_LINK_TYPE_XDP = 6,
BPF_LINK_TYPE_PERF_EVENT = 7,
BPF_LINK_TYPE_KPROBE_MULTI = 8,

MAX_BPF_LINK_TYPE,
};
# 1183 "./include/uapi/linux/bpf.h"
enum {
BPF_ANY = 0,
BPF_NOEXIST = 1,
BPF_EXIST = 2,
BPF_F_LOCK = 4,
};


enum {
BPF_F_NO_PREALLOC = (1U << 0),






BPF_F_NO_COMMON_LRU = (1U << 1),

BPF_F_NUMA_NODE = (1U << 2),


BPF_F_RDONLY = (1U << 3),
BPF_F_WRONLY = (1U << 4),


BPF_F_STACK_BUILD_ID = (1U << 5),


BPF_F_ZERO_SEED = (1U << 6),


BPF_F_RDONLY_PROG = (1U << 7),
BPF_F_WRONLY_PROG = (1U << 8),


BPF_F_CLONE = (1U << 9),


BPF_F_MMAPABLE = (1U << 10),


BPF_F_PRESERVE_ELEMS = (1U << 11),


BPF_F_INNER_MAP = (1U << 12),
};
# 1246 "./include/uapi/linux/bpf.h"
enum bpf_stats_type {

BPF_STATS_RUN_TIME = 0,
};

enum bpf_stack_build_id_status {

BPF_STACK_BUILD_ID_EMPTY = 0,

BPF_STACK_BUILD_ID_VALID = 1,

BPF_STACK_BUILD_ID_IP = 2,
};


struct bpf_stack_build_id {
__s32 status;
unsigned char build_id[20];
union {
__u64 offset;
__u64 ip;
};
};



union bpf_attr {
struct {
__u32 map_type;
__u32 key_size;
__u32 value_size;
__u32 max_entries;
__u32 map_flags;


__u32 inner_map_fd;
__u32 numa_node;


char map_name[16U];
__u32 map_ifindex;
__u32 btf_fd;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
__u32 btf_vmlinux_value_type_id;
# 1300 "./include/uapi/linux/bpf.h"
__u64 map_extra;
};

struct {
__u32 map_fd;
__u64 __attribute__((aligned(8))) key;
union {
__u64 __attribute__((aligned(8))) value;
__u64 __attribute__((aligned(8))) next_key;
};
__u64 flags;
};

struct {
__u64 __attribute__((aligned(8))) in_batch;


__u64 __attribute__((aligned(8))) out_batch;
__u64 __attribute__((aligned(8))) keys;
__u64 __attribute__((aligned(8))) values;
__u32 count;




__u32 map_fd;
__u64 elem_flags;
__u64 flags;
} batch;

struct {
__u32 prog_type;
__u32 insn_cnt;
__u64 __attribute__((aligned(8))) insns;
__u64 __attribute__((aligned(8))) license;
__u32 log_level;
__u32 log_size;
__u64 __attribute__((aligned(8))) log_buf;
__u32 kern_version;
__u32 prog_flags;
char prog_name[16U];
__u32 prog_ifindex;




__u32 expected_attach_type;
__u32 prog_btf_fd;
__u32 func_info_rec_size;
__u64 __attribute__((aligned(8))) func_info;
__u32 func_info_cnt;
__u32 line_info_rec_size;
__u64 __attribute__((aligned(8))) line_info;
__u32 line_info_cnt;
__u32 attach_btf_id;
union {

__u32 attach_prog_fd;

__u32 attach_btf_obj_fd;
};
__u32 core_relo_cnt;
__u64 __attribute__((aligned(8))) fd_array;
__u64 __attribute__((aligned(8))) core_relos;
__u32 core_relo_rec_size;
};

struct {
__u64 __attribute__((aligned(8))) pathname;
__u32 bpf_fd;
__u32 file_flags;
};

struct {
__u32 target_fd;
__u32 attach_bpf_fd;
__u32 attach_type;
__u32 attach_flags;
__u32 replace_bpf_fd;



};

struct {
__u32 prog_fd;
__u32 retval;
__u32 data_size_in;
__u32 data_size_out;



__u64 __attribute__((aligned(8))) data_in;
__u64 __attribute__((aligned(8))) data_out;
__u32 repeat;
__u32 duration;
__u32 ctx_size_in;
__u32 ctx_size_out;



__u64 __attribute__((aligned(8))) ctx_in;
__u64 __attribute__((aligned(8))) ctx_out;
__u32 flags;
__u32 cpu;
__u32 batch_size;
} test;

struct {
union {
__u32 start_id;
__u32 prog_id;
__u32 map_id;
__u32 btf_id;
__u32 link_id;
};
__u32 next_id;
__u32 open_flags;
};

struct {
__u32 bpf_fd;
__u32 info_len;
__u64 __attribute__((aligned(8))) info;
} info;

struct {
__u32 target_fd;
__u32 attach_type;
__u32 query_flags;
__u32 attach_flags;
__u64 __attribute__((aligned(8))) prog_ids;
__u32 prog_cnt;
} query;

struct {
__u64 name;
__u32 prog_fd;
} raw_tracepoint;

struct {
__u64 __attribute__((aligned(8))) btf;
__u64 __attribute__((aligned(8))) btf_log_buf;
__u32 btf_size;
__u32 btf_log_size;
__u32 btf_log_level;
};

struct {
__u32 pid;
__u32 fd;
__u32 flags;
__u32 buf_len;
__u64 __attribute__((aligned(8))) buf;




__u32 prog_id;
__u32 fd_type;
__u64 probe_offset;
__u64 probe_addr;
} task_fd_query;

struct {
__u32 prog_fd;
union {
__u32 target_fd;
__u32 target_ifindex;
};
__u32 attach_type;
__u32 flags;
union {
__u32 target_btf_id;
struct {
__u64 __attribute__((aligned(8))) iter_info;
__u32 iter_info_len;
};
struct {




__u64 bpf_cookie;
} perf_event;
struct {
__u32 flags;
__u32 cnt;
__u64 __attribute__((aligned(8))) syms;
__u64 __attribute__((aligned(8))) addrs;
__u64 __attribute__((aligned(8))) cookies;
} kprobe_multi;
};
} link_create;

struct {
__u32 link_fd;

__u32 new_prog_fd;
__u32 flags;


__u32 old_prog_fd;
} link_update;

struct {
__u32 link_fd;
} link_detach;

struct {
__u32 type;
} enable_stats;

struct {
__u32 link_fd;
__u32 flags;
} iter_create;

struct {
__u32 prog_fd;
__u32 map_fd;
__u32 flags;
} prog_bind_map;

} __attribute__((aligned(8)));
# 5348 "./include/uapi/linux/bpf.h"
enum bpf_func_id {
BPF_FUNC_unspec, BPF_FUNC_map_lookup_elem, BPF_FUNC_map_update_elem, BPF_FUNC_map_delete_elem, BPF_FUNC_probe_read, BPF_FUNC_ktime_get_ns, BPF_FUNC_trace_printk, BPF_FUNC_get_prandom_u32, BPF_FUNC_get_smp_processor_id, BPF_FUNC_skb_store_bytes, BPF_FUNC_l3_csum_replace, BPF_FUNC_l4_csum_replace, BPF_FUNC_tail_call, BPF_FUNC_clone_redirect, BPF_FUNC_get_current_pid_tgid, BPF_FUNC_get_current_uid_gid, BPF_FUNC_get_current_comm, BPF_FUNC_get_cgroup_classid, BPF_FUNC_skb_vlan_push, BPF_FUNC_skb_vlan_pop, BPF_FUNC_skb_get_tunnel_key, BPF_FUNC_skb_set_tunnel_key, BPF_FUNC_perf_event_read, BPF_FUNC_redirect, BPF_FUNC_get_route_realm, BPF_FUNC_perf_event_output, BPF_FUNC_skb_load_bytes, BPF_FUNC_get_stackid, BPF_FUNC_csum_diff, BPF_FUNC_skb_get_tunnel_opt, BPF_FUNC_skb_set_tunnel_opt, BPF_FUNC_skb_change_proto, BPF_FUNC_skb_change_type, BPF_FUNC_skb_under_cgroup, BPF_FUNC_get_hash_recalc, BPF_FUNC_get_current_task, BPF_FUNC_probe_write_user, BPF_FUNC_current_task_under_cgroup, BPF_FUNC_skb_change_tail, BPF_FUNC_skb_pull_data, BPF_FUNC_csum_update, BPF_FUNC_set_hash_invalid, BPF_FUNC_get_numa_node_id, BPF_FUNC_skb_change_head, BPF_FUNC_xdp_adjust_head, BPF_FUNC_probe_read_str, BPF_FUNC_get_socket_cookie, BPF_FUNC_get_socket_uid, BPF_FUNC_set_hash, BPF_FUNC_setsockopt, BPF_FUNC_skb_adjust_room, BPF_FUNC_redirect_map, BPF_FUNC_sk_redirect_map, BPF_FUNC_sock_map_update, BPF_FUNC_xdp_adjust_meta, BPF_FUNC_perf_event_read_value, BPF_FUNC_perf_prog_read_value, BPF_FUNC_getsockopt, BPF_FUNC_override_return, BPF_FUNC_sock_ops_cb_flags_set, BPF_FUNC_msg_redirect_map, BPF_FUNC_msg_apply_bytes, BPF_FUNC_msg_cork_bytes, BPF_FUNC_msg_pull_data, BPF_FUNC_bind, BPF_FUNC_xdp_adjust_tail, BPF_FUNC_skb_get_xfrm_state, BPF_FUNC_get_stack, BPF_FUNC_skb_load_bytes_relative, BPF_FUNC_fib_lookup, BPF_FUNC_sock_hash_update, BPF_FUNC_msg_redirect_hash, BPF_FUNC_sk_redirect_hash, BPF_FUNC_lwt_push_encap, BPF_FUNC_lwt_seg6_store_bytes, BPF_FUNC_lwt_seg6_adjust_srh, BPF_FUNC_lwt_seg6_action, BPF_FUNC_rc_repeat, BPF_FUNC_rc_keydown, BPF_FUNC_skb_cgroup_id, BPF_FUNC_get_current_cgroup_id, BPF_FUNC_get_local_storage, BPF_FUNC_sk_select_reuseport, BPF_FUNC_skb_ancestor_cgroup_id, BPF_FUNC_sk_lookup_tcp, BPF_FUNC_sk_lookup_udp, BPF_FUNC_sk_release, BPF_FUNC_map_push_elem, BPF_FUNC_map_pop_elem, BPF_FUNC_map_peek_elem, BPF_FUNC_msg_push_data, BPF_FUNC_msg_pop_data, BPF_FUNC_rc_pointer_rel, BPF_FUNC_spin_lock, BPF_FUNC_spin_unlock, BPF_FUNC_sk_fullsock, BPF_FUNC_tcp_sock, BPF_FUNC_skb_ecn_set_ce, BPF_FUNC_get_listener_sock, BPF_FUNC_skc_lookup_tcp, BPF_FUNC_tcp_check_syncookie, BPF_FUNC_sysctl_get_name, BPF_FUNC_sysctl_get_current_value, BPF_FUNC_sysctl_get_new_value, BPF_FUNC_sysctl_set_new_value, BPF_FUNC_strtol, BPF_FUNC_strtoul, BPF_FUNC_sk_storage_get, BPF_FUNC_sk_storage_delete, BPF_FUNC_send_signal, BPF_FUNC_tcp_gen_syncookie, BPF_FUNC_skb_output, BPF_FUNC_probe_read_user, BPF_FUNC_probe_read_kernel, BPF_FUNC_probe_read_user_str, BPF_FUNC_probe_read_kernel_str, BPF_FUNC_tcp_send_ack, BPF_FUNC_send_signal_thread, BPF_FUNC_jiffies64, BPF_FUNC_read_branch_records, BPF_FUNC_get_ns_current_pid_tgid, BPF_FUNC_xdp_output, BPF_FUNC_get_netns_cookie, BPF_FUNC_get_current_ancestor_cgroup_id, BPF_FUNC_sk_assign, BPF_FUNC_ktime_get_boot_ns, BPF_FUNC_seq_printf, BPF_FUNC_seq_write, BPF_FUNC_sk_cgroup_id, BPF_FUNC_sk_ancestor_cgroup_id, BPF_FUNC_ringbuf_output, BPF_FUNC_ringbuf_reserve, BPF_FUNC_ringbuf_submit, BPF_FUNC_ringbuf_discard, BPF_FUNC_ringbuf_query, BPF_FUNC_csum_level, BPF_FUNC_skc_to_tcp6_sock, BPF_FUNC_skc_to_tcp_sock, BPF_FUNC_skc_to_tcp_timewait_sock, BPF_FUNC_skc_to_tcp_request_sock, BPF_FUNC_skc_to_udp6_sock, BPF_FUNC_get_task_stack, BPF_FUNC_load_hdr_opt, BPF_FUNC_store_hdr_opt, BPF_FUNC_reserve_hdr_opt, BPF_FUNC_inode_storage_get, BPF_FUNC_inode_storage_delete, BPF_FUNC_d_path, BPF_FUNC_copy_from_user, BPF_FUNC_snprintf_btf, BPF_FUNC_seq_printf_btf, BPF_FUNC_skb_cgroup_classid, BPF_FUNC_redirect_neigh, BPF_FUNC_per_cpu_ptr, BPF_FUNC_this_cpu_ptr, BPF_FUNC_redirect_peer, BPF_FUNC_task_storage_get, BPF_FUNC_task_storage_delete, BPF_FUNC_get_current_task_btf, BPF_FUNC_bprm_opts_set, BPF_FUNC_ktime_get_coarse_ns, BPF_FUNC_ima_inode_hash, BPF_FUNC_sock_from_file, BPF_FUNC_check_mtu, BPF_FUNC_for_each_map_elem, BPF_FUNC_snprintf, BPF_FUNC_sys_bpf, BPF_FUNC_btf_find_by_name_kind, BPF_FUNC_sys_close, BPF_FUNC_timer_init, BPF_FUNC_timer_set_callback, BPF_FUNC_timer_start, BPF_FUNC_timer_cancel, BPF_FUNC_get_func_ip, BPF_FUNC_get_attach_cookie, BPF_FUNC_task_pt_regs, BPF_FUNC_get_branch_snapshot, BPF_FUNC_trace_vprintk, BPF_FUNC_skc_to_unix_sock, BPF_FUNC_kallsyms_lookup_name, BPF_FUNC_find_vma, BPF_FUNC_loop, BPF_FUNC_strncmp, BPF_FUNC_get_func_arg, BPF_FUNC_get_func_ret, BPF_FUNC_get_func_arg_cnt, BPF_FUNC_get_retval, BPF_FUNC_set_retval, BPF_FUNC_xdp_get_buff_len, BPF_FUNC_xdp_load_bytes, BPF_FUNC_xdp_store_bytes, BPF_FUNC_copy_from_user_task, BPF_FUNC_skb_set_tstamp, BPF_FUNC_ima_file_hash,
__BPF_FUNC_MAX_ID,
};





enum {
BPF_F_RECOMPUTE_CSUM = (1ULL << 0),
BPF_F_INVALIDATE_HASH = (1ULL << 1),
};




enum {
BPF_F_HDR_FIELD_MASK = 0xfULL,
};


enum {
BPF_F_PSEUDO_HDR = (1ULL << 4),
BPF_F_MARK_MANGLED_0 = (1ULL << 5),
BPF_F_MARK_ENFORCE = (1ULL << 6),
};


enum {
BPF_F_INGRESS = (1ULL << 0),
};


enum {
BPF_F_TUNINFO_IPV6 = (1ULL << 0),
};


enum {
BPF_F_SKIP_FIELD_MASK = 0xffULL,
BPF_F_USER_STACK = (1ULL << 8),

BPF_F_FAST_STACK_CMP = (1ULL << 9),
BPF_F_REUSE_STACKID = (1ULL << 10),

BPF_F_USER_BUILD_ID = (1ULL << 11),
};


enum {
BPF_F_ZERO_CSUM_TX = (1ULL << 1),
BPF_F_DONT_FRAGMENT = (1ULL << 2),
BPF_F_SEQ_NUMBER = (1ULL << 3),
};




enum {
BPF_F_INDEX_MASK = 0xffffffffULL,
BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK,

BPF_F_CTXLEN_MASK = (0xfffffULL << 32),
};


enum {
BPF_F_CURRENT_NETNS = (-1L),
};


enum {
BPF_CSUM_LEVEL_QUERY,
BPF_CSUM_LEVEL_INC,
BPF_CSUM_LEVEL_DEC,
BPF_CSUM_LEVEL_RESET,
};


enum {
BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1),
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6),
};

enum {
BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff,
BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56,
};






enum {
BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
};


enum {
BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0),



BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE,
};


enum {
BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0),
};




enum {
BPF_RB_NO_WAKEUP = (1ULL << 0),
BPF_RB_FORCE_WAKEUP = (1ULL << 1),
};


enum {
BPF_RB_AVAIL_DATA = 0,
BPF_RB_RING_SIZE = 1,
BPF_RB_CONS_POS = 2,
BPF_RB_PROD_POS = 3,
};


enum {
BPF_RINGBUF_BUSY_BIT = (1U << 31),
BPF_RINGBUF_DISCARD_BIT = (1U << 30),
BPF_RINGBUF_HDR_SZ = 8,
};


enum {
BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0),
BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1),
};


enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
BPF_ADJ_ROOM_MAC,
};


enum bpf_hdr_start_off {
BPF_HDR_START_MAC,
BPF_HDR_START_NET,
};


enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_SEG6,
BPF_LWT_ENCAP_SEG6_INLINE,
BPF_LWT_ENCAP_IP,
};


enum {
BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
};


enum {
BPF_F_BROADCAST = (1ULL << 3),
BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
};







enum {
BPF_SKB_TSTAMP_UNSPEC,
BPF_SKB_TSTAMP_DELIVERY_MONO,




};




struct __sk_buff {
__u32 len;
__u32 pkt_type;
__u32 mark;
__u32 queue_mapping;
__u32 protocol;
__u32 vlan_present;
__u32 vlan_tci;
__u32 vlan_proto;
__u32 priority;
__u32 ingress_ifindex;
__u32 ifindex;
__u32 tc_index;
__u32 cb[5];
__u32 hash;
__u32 tc_classid;
__u32 data;
__u32 data_end;
__u32 napi_id;


__u32 family;
__u32 remote_ip4;
__u32 local_ip4;
__u32 remote_ip6[4];
__u32 local_ip6[4];
__u32 remote_port;
__u32 local_port;


__u32 data_meta;
union { struct bpf_flow_keys * flow_keys; __u64 :64; } __attribute__((aligned(8)));
__u64 tstamp;
__u32 wire_len;
__u32 gso_segs;
union { struct bpf_sock * sk; __u64 :64; } __attribute__((aligned(8)));
__u32 gso_size;
__u8 tstamp_type;
__u32 :24;
__u64 hwtstamp;
};

struct bpf_tunnel_key {
__u32 tunnel_id;
union {
__u32 remote_ipv4;
__u32 remote_ipv6[4];
};
__u8 tunnel_tos;
__u8 tunnel_ttl;
__u16 tunnel_ext;
__u32 tunnel_label;
};




struct bpf_xfrm_state {
__u32 reqid;
__u32 spi;
__u16 family;
__u16 ext;
union {
__u32 remote_ipv4;
__u32 remote_ipv6[4];
};
};
# 5618 "./include/uapi/linux/bpf.h"
enum bpf_ret_code {
BPF_OK = 0,

BPF_DROP = 2,

BPF_REDIRECT = 7,
# 5632 "./include/uapi/linux/bpf.h"
BPF_LWT_REROUTE = 128,
};

struct bpf_sock {
__u32 bound_dev_if;
__u32 family;
__u32 type;
__u32 protocol;
__u32 mark;
__u32 priority;

__u32 src_ip4;
__u32 src_ip6[4];
__u32 src_port;
__be16 dst_port;
__u16 :16;
__u32 dst_ip4;
__u32 dst_ip6[4];
__u32 state;
__s32 rx_queue_mapping;
};

struct bpf_tcp_sock {
__u32 snd_cwnd;
__u32 srtt_us;
__u32 rtt_min;
__u32 snd_ssthresh;
__u32 rcv_nxt;
__u32 snd_nxt;
__u32 snd_una;
__u32 mss_cache;
__u32 ecn_flags;
__u32 rate_delivered;
__u32 rate_interval_us;
__u32 packets_out;
__u32 retrans_out;
__u32 total_retrans;
__u32 segs_in;


__u32 data_segs_in;


__u32 segs_out;


__u32 data_segs_out;


__u32 lost_out;
__u32 sacked_out;
__u64 bytes_received;



__u64 bytes_acked;



__u32 dsack_dups;


__u32 delivered;
__u32 delivered_ce;
__u32 icsk_retransmits;
};

struct bpf_sock_tuple {
union {
struct {
__be32 saddr;
__be32 daddr;
__be16 sport;
__be16 dport;
} ipv4;
struct {
__be32 saddr[4];
__be32 daddr[4];
__be16 sport;
__be16 dport;
} ipv6;
};
};

struct bpf_xdp_sock {
__u32 queue_id;
};
# 5727 "./include/uapi/linux/bpf.h"
enum xdp_action {
XDP_ABORTED = 0,
XDP_DROP,
XDP_PASS,
XDP_TX,
XDP_REDIRECT,
};




struct xdp_md {
__u32 data;
__u32 data_end;
__u32 data_meta;

__u32 ingress_ifindex;
__u32 rx_queue_index;

__u32 egress_ifindex;
};






struct bpf_devmap_val {
__u32 ifindex;
union {
int fd;
__u32 id;
} bpf_prog;
};






struct bpf_cpumap_val {
__u32 qsize;
union {
int fd;
__u32 id;
} bpf_prog;
};

enum sk_action {
SK_DROP = 0,
SK_PASS,
};




struct sk_msg_md {
union { void * data; __u64 :64; } __attribute__((aligned(8)));
union { void * data_end; __u64 :64; } __attribute__((aligned(8)));

__u32 family;
__u32 remote_ip4;
__u32 local_ip4;
__u32 remote_ip6[4];
__u32 local_ip6[4];
__u32 remote_port;
__u32 local_port;
__u32 size;

union { struct bpf_sock * sk; __u64 :64; } __attribute__((aligned(8)));
};

struct sk_reuseport_md {




union { void * data; __u64 :64; } __attribute__((aligned(8)));

union { void * data_end; __u64 :64; } __attribute__((aligned(8)));






__u32 len;




__u32 eth_protocol;
__u32 ip_protocol;
__u32 bind_inany;
__u32 hash;
# 5834 "./include/uapi/linux/bpf.h"
union { struct bpf_sock * sk; __u64 :64; } __attribute__((aligned(8)));
union { struct bpf_sock * migrating_sk; __u64 :64; } __attribute__((aligned(8)));
};



struct bpf_prog_info {
__u32 type;
__u32 id;
__u8 tag[8];
__u32 jited_prog_len;
__u32 xlated_prog_len;
__u64 __attribute__((aligned(8))) jited_prog_insns;
__u64 __attribute__((aligned(8))) xlated_prog_insns;
__u64 load_time;
__u32 created_by_uid;
__u32 nr_map_ids;
__u64 __attribute__((aligned(8))) map_ids;
char name[16U];
__u32 ifindex;
__u32 gpl_compatible:1;
__u32 :31;
__u64 netns_dev;
__u64 netns_ino;
__u32 nr_jited_ksyms;
__u32 nr_jited_func_lens;
__u64 __attribute__((aligned(8))) jited_ksyms;
__u64 __attribute__((aligned(8))) jited_func_lens;
__u32 btf_id;
__u32 func_info_rec_size;
__u64 __attribute__((aligned(8))) func_info;
__u32 nr_func_info;
__u32 nr_line_info;
__u64 __attribute__((aligned(8))) line_info;
__u64 __attribute__((aligned(8))) jited_line_info;
__u32 nr_jited_line_info;
__u32 line_info_rec_size;
__u32 jited_line_info_rec_size;
__u32 nr_prog_tags;
__u64 __attribute__((aligned(8))) prog_tags;
__u64 run_time_ns;
__u64 run_cnt;
__u64 recursion_misses;
__u32 verified_insns;
} __attribute__((aligned(8)));

struct bpf_map_info {
__u32 type;
__u32 id;
__u32 key_size;
__u32 value_size;
__u32 max_entries;
__u32 map_flags;
char name[16U];
__u32 ifindex;
__u32 btf_vmlinux_value_type_id;
__u64 netns_dev;
__u64 netns_ino;
__u32 btf_id;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
__u32 :32;
__u64 map_extra;
} __attribute__((aligned(8)));

struct bpf_btf_info {
__u64 __attribute__((aligned(8))) btf;
__u32 btf_size;
__u32 id;
__u64 __attribute__((aligned(8))) name;
__u32 name_len;
__u32 kernel_btf;
} __attribute__((aligned(8)));

struct bpf_link_info {
__u32 type;
__u32 id;
__u32 prog_id;
union {
struct {
__u64 __attribute__((aligned(8))) tp_name;
__u32 tp_name_len;
} raw_tracepoint;
struct {
__u32 attach_type;
__u32 target_obj_id;
__u32 target_btf_id;
} tracing;
struct {
__u64 cgroup_id;
__u32 attach_type;
} cgroup;
struct {
__u64 __attribute__((aligned(8))) target_name;
__u32 target_name_len;
union {
struct {
__u32 map_id;
} map;
};
} iter;
struct {
__u32 netns_ino;
__u32 attach_type;
} netns;
struct {
__u32 ifindex;
} xdp;
};
} __attribute__((aligned(8)));





struct bpf_sock_addr {
__u32 user_family;
__u32 user_ip4;


__u32 user_ip6[4];


__u32 user_port;


__u32 family;
__u32 type;
__u32 protocol;
__u32 msg_src_ip4;


__u32 msg_src_ip6[4];


union { struct bpf_sock * sk; __u64 :64; } __attribute__((aligned(8)));
};







struct bpf_sock_ops {
__u32 op;
union {
__u32 args[4];
__u32 reply;
__u32 replylong[4];
};
__u32 family;
__u32 remote_ip4;
__u32 local_ip4;
__u32 remote_ip6[4];
__u32 local_ip6[4];
__u32 remote_port;
__u32 local_port;
__u32 is_fullsock;



__u32 snd_cwnd;
__u32 srtt_us;
__u32 bpf_sock_ops_cb_flags;
__u32 state;
__u32 rtt_min;
__u32 snd_ssthresh;
__u32 rcv_nxt;
__u32 snd_nxt;
__u32 snd_una;
__u32 mss_cache;
__u32 ecn_flags;
__u32 rate_delivered;
__u32 rate_interval_us;
__u32 packets_out;
__u32 retrans_out;
__u32 total_retrans;
__u32 segs_in;
__u32 data_segs_in;
__u32 segs_out;
__u32 data_segs_out;
__u32 lost_out;
__u32 sacked_out;
__u32 sk_txhash;
__u64 bytes_received;
__u64 bytes_acked;
union { struct bpf_sock * sk; __u64 :64; } __attribute__((aligned(8)));
# 6036 "./include/uapi/linux/bpf.h"
union { void * skb_data; __u64 :64; } __attribute__((aligned(8)));
union { void * skb_data_end; __u64 :64; } __attribute__((aligned(8)));
__u32 skb_len;



__u32 skb_tcp_flags;
# 6052 "./include/uapi/linux/bpf.h"
};


enum {
BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0),
BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
# 6078 "./include/uapi/linux/bpf.h"
BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4),
# 6087 "./include/uapi/linux/bpf.h"
BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5),
# 6102 "./include/uapi/linux/bpf.h"
BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6),

BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F,
};




enum {
BPF_SOCK_OPS_VOID,
BPF_SOCK_OPS_TIMEOUT_INIT,


BPF_SOCK_OPS_RWND_INIT,



BPF_SOCK_OPS_TCP_CONNECT_CB,


BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB,



BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB,



BPF_SOCK_OPS_NEEDS_ECN,


BPF_SOCK_OPS_BASE_RTT,






BPF_SOCK_OPS_RTO_CB,




BPF_SOCK_OPS_RETRANS_CB,





BPF_SOCK_OPS_STATE_CB,



BPF_SOCK_OPS_TCP_LISTEN_CB,


BPF_SOCK_OPS_RTT_CB,

BPF_SOCK_OPS_PARSE_HDR_OPT_CB,
# 6174 "./include/uapi/linux/bpf.h"
BPF_SOCK_OPS_HDR_OPT_LEN_CB,
# 6191 "./include/uapi/linux/bpf.h"
BPF_SOCK_OPS_WRITE_HDR_OPT_CB,
# 6217 "./include/uapi/linux/bpf.h"
};






enum {
BPF_TCP_ESTABLISHED = 1,
BPF_TCP_SYN_SENT,
BPF_TCP_SYN_RECV,
BPF_TCP_FIN_WAIT1,
BPF_TCP_FIN_WAIT2,
BPF_TCP_TIME_WAIT,
BPF_TCP_CLOSE,
BPF_TCP_CLOSE_WAIT,
BPF_TCP_LAST_ACK,
BPF_TCP_LISTEN,
BPF_TCP_CLOSING,
BPF_TCP_NEW_SYN_RECV,

BPF_TCP_MAX_STATES
};

enum {
TCP_BPF_IW = 1001,
TCP_BPF_SNDCWND_CLAMP = 1002,
TCP_BPF_DELACK_MAX = 1003,
TCP_BPF_RTO_MIN = 1004,
# 6278 "./include/uapi/linux/bpf.h"
TCP_BPF_SYN = 1005,
TCP_BPF_SYN_IP = 1006,
TCP_BPF_SYN_MAC = 1007,
};

enum {
BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0),
};




enum {
BPF_WRITE_HDR_TCP_CURRENT_MSS = 1,






BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2,


};

struct bpf_perf_event_value {
__u64 counter;
__u64 enabled;
__u64 running;
};

enum {
BPF_DEVCG_ACC_MKNOD = (1ULL << 0),
BPF_DEVCG_ACC_READ = (1ULL << 1),
BPF_DEVCG_ACC_WRITE = (1ULL << 2),
};

enum {
BPF_DEVCG_DEV_BLOCK = (1ULL << 0),
BPF_DEVCG_DEV_CHAR = (1ULL << 1),
};

struct bpf_cgroup_dev_ctx {

__u32 access_type;
__u32 major;
__u32 minor;
};

struct bpf_raw_tracepoint_args {
__u64 args[0];
};




enum {
BPF_FIB_LOOKUP_DIRECT = (1U << 0),
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
};

enum {
BPF_FIB_LKUP_RET_SUCCESS,
BPF_FIB_LKUP_RET_BLACKHOLE,
BPF_FIB_LKUP_RET_UNREACHABLE,
BPF_FIB_LKUP_RET_PROHIBIT,
BPF_FIB_LKUP_RET_NOT_FWDED,
BPF_FIB_LKUP_RET_FWD_DISABLED,
BPF_FIB_LKUP_RET_UNSUPP_LWT,
BPF_FIB_LKUP_RET_NO_NEIGH,
BPF_FIB_LKUP_RET_FRAG_NEEDED,
};

struct bpf_fib_lookup {



__u8 family;


__u8 l4_protocol;
__be16 sport;
__be16 dport;

union {

__u16 tot_len;


__u16 mtu_result;
};



__u32 ifindex;

union {

__u8 tos;
__be32 flowinfo;


__u32 rt_metric;
};

union {
__be32 ipv4_src;
__u32 ipv6_src[4];
};





union {
__be32 ipv4_dst;
__u32 ipv6_dst[4];
};


__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__u8 smac[6];
__u8 dmac[6];
};

struct bpf_redir_neigh {

__u32 nh_family;

union {
__be32 ipv4_nh;
__u32 ipv6_nh[4];
};
};


enum bpf_check_mtu_flags {
BPF_MTU_CHK_SEGS = (1U << 0),
};

enum bpf_check_mtu_ret {
BPF_MTU_CHK_RET_SUCCESS,
BPF_MTU_CHK_RET_FRAG_NEEDED,
BPF_MTU_CHK_RET_SEGS_TOOBIG,
};

enum bpf_task_fd_type {
BPF_FD_TYPE_RAW_TRACEPOINT,
BPF_FD_TYPE_TRACEPOINT,
BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
BPF_FD_TYPE_UPROBE,
BPF_FD_TYPE_URETPROBE,
};

enum {
BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0),
BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1),
BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2),
};

struct bpf_flow_keys {
__u16 nhoff;
__u16 thoff;
__u16 addr_proto;
__u8 is_frag;
__u8 is_first_frag;
__u8 is_encap;
__u8 ip_proto;
__be16 n_proto;
__be16 sport;
__be16 dport;
union {
struct {
__be32 ipv4_src;
__be32 ipv4_dst;
};
struct {
__u32 ipv6_src[4];
__u32 ipv6_dst[4];
};
};
__u32 flags;
__be32 flow_label;
};

struct bpf_func_info {
__u32 insn_off;
__u32 type_id;
};




struct bpf_line_info {
__u32 insn_off;
__u32 file_name_off;
__u32 line_off;
__u32 line_col;
};

struct bpf_spin_lock {
__u32 val;
};

struct bpf_timer {
__u64 :64;
__u64 :64;
} __attribute__((aligned(8)));

struct bpf_sysctl {
__u32 write;


__u32 file_pos;


};

struct bpf_sockopt {
union { struct bpf_sock * sk; __u64 :64; } __attribute__((aligned(8)));
union { void * optval; __u64 :64; } __attribute__((aligned(8)));
union { void * optval_end; __u64 :64; } __attribute__((aligned(8)));

__s32 level;
__s32 optname;
__s32 optlen;
__s32 retval;
};

struct bpf_pidns_info {
__u32 pid;
__u32 tgid;
};


struct bpf_sk_lookup {
union {
union { struct bpf_sock * sk; __u64 :64; } __attribute__((aligned(8)));
__u64 cookie;
};

__u32 family;
__u32 protocol;
__u32 remote_ip4;
__u32 remote_ip6[4];
__be16 remote_port;
__u16 :16;
__u32 local_ip4;
__u32 local_ip6[4];
__u32 local_port;
__u32 ingress_ifindex;
};
# 6541 "./include/uapi/linux/bpf.h"
struct btf_ptr {
void *ptr;
__u32 type_id;
__u32 flags;
};
# 6556 "./include/uapi/linux/bpf.h"
enum {
BTF_F_COMPACT = (1ULL << 0),
BTF_F_NONAME = (1ULL << 1),
BTF_F_PTR_RAW = (1ULL << 2),
BTF_F_ZERO = (1ULL << 3),
};





enum bpf_core_relo_kind {
BPF_CORE_FIELD_BYTE_OFFSET = 0,
BPF_CORE_FIELD_BYTE_SIZE = 1,
BPF_CORE_FIELD_EXISTS = 2,
BPF_CORE_FIELD_SIGNED = 3,
BPF_CORE_FIELD_LSHIFT_U64 = 4,
BPF_CORE_FIELD_RSHIFT_U64 = 5,
BPF_CORE_TYPE_ID_LOCAL = 6,
BPF_CORE_TYPE_ID_TARGET = 7,
BPF_CORE_TYPE_EXISTS = 8,
BPF_CORE_TYPE_SIZE = 9,
BPF_CORE_ENUMVAL_EXISTS = 10,
BPF_CORE_ENUMVAL_VALUE = 11,
};
# 6630 "./include/uapi/linux/bpf.h"
struct bpf_core_relo {
__u32 insn_off;
__u32 type_id;
__u32 access_str_off;
enum bpf_core_relo_kind kind;
};
# 24 "./include/net/ip6_fib.h" 2
# 39 "./include/net/ip6_fib.h"
struct rt6_info;
struct fib6_info;

struct fib6_config {
u32 fc_table;
u32 fc_metric;
int fc_dst_len;
int fc_src_len;
int fc_ifindex;
u32 fc_flags;
u32 fc_protocol;
u16 fc_type;
u16 fc_delete_all_nh : 1,
fc_ignore_dev_down:1,
__unused : 14;
u32 fc_nh_id;

struct in6_addr fc_dst;
struct in6_addr fc_src;
struct in6_addr fc_prefsrc;
struct in6_addr fc_gateway;

unsigned long fc_expires;
struct nlattr *fc_mx;
int fc_mx_len;
int fc_mp_len;
struct nlattr *fc_mp;

struct nl_info fc_nlinfo;
struct nlattr *fc_encap;
u16 fc_encap_type;
bool fc_is_fdb;
};

struct fib6_node {
struct fib6_node *parent;
struct fib6_node *left;
struct fib6_node *right;



struct fib6_info *leaf;

__u16 fn_bit;
__u16 fn_flags;
int fn_sernum;
struct fib6_info *rr_ptr;
struct callback_head rcu;
};

struct fib6_gc_args {
int timeout;
int more;
};




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib6_routes_require_src(const struct net *net)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fib6_routes_require_src_inc(struct net *net) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fib6_routes_require_src_dec(struct net *net) {}
# 130 "./include/net/ip6_fib.h"
struct rt6key {
struct in6_addr addr;
int plen;
};

struct fib6_table;

struct rt6_exception_bucket {
struct hlist_head chain;
int depth;
};

struct rt6_exception {
struct hlist_node hlist;
struct rt6_info *rt6i;
unsigned long stamp;
struct callback_head rcu;
};





struct fib6_nh {
struct fib_nh_common nh_common;





struct rt6_info * *rt6i_pcpu;
struct rt6_exception_bucket *rt6i_exception_bucket;
};

struct fib6_info {
struct fib6_table *fib6_table;
struct fib6_info *fib6_next;
struct fib6_node *fib6_node;






union {
struct list_head fib6_siblings;
struct list_head nh_list;
};
unsigned int fib6_nsiblings;

refcount_t fib6_ref;
unsigned long expires;
struct dst_metrics *fib6_metrics;


struct rt6key fib6_dst;
u32 fib6_flags;
struct rt6key fib6_src;
struct rt6key fib6_prefsrc;

u32 fib6_metric;
u8 fib6_protocol;
u8 fib6_type;

u8 offload;
u8 trap;
u8 offload_failed;

u8 should_flush:1,
dst_nocount:1,
dst_nopolicy:1,
fib6_destroying:1,
unused:4;

struct callback_head rcu;
struct nexthop *nh;
struct fib6_nh fib6_nh[];
};

struct rt6_info {
struct dst_entry dst;
struct fib6_info *from;
int sernum;

struct rt6key rt6i_dst;
struct rt6key rt6i_src;
struct in6_addr rt6i_gateway;
struct inet6_dev *rt6i_idev;
u32 rt6i_flags;

struct list_head rt6i_uncached;
struct uncached_list *rt6i_uncached_list;


unsigned short rt6i_nfheader_len;
};

struct fib6_result {
struct fib6_nh *nh;
struct fib6_info *f6i;
u32 fib6_flags;
u8 fib6_type;
struct rt6_info *rt6;
};
# 243 "./include/net/ip6_fib.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
{
return ((struct rt6_info *)dst)->rt6i_idev;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib6_requires_src(const struct fib6_info *rt)
{
return rt->fib6_src.plen > 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fib6_clean_expires(struct fib6_info *f6i)
{
f6i->fib6_flags &= ~0x00400000;
f6i->expires = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fib6_set_expires(struct fib6_info *f6i,
unsigned long expires)
{
f6i->expires = expires;
f6i->fib6_flags |= 0x00400000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib6_check_expired(const struct fib6_info *f6i)
{
if (f6i->fib6_flags & 0x00400000)
return (({ unsigned long __dummy; typeof(jiffies) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy; typeof(f6i->expires) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ((long)((f6i->expires) - (jiffies)) < 0));
return false;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib6_get_cookie_safe(const struct fib6_info *f6i,
u32 *cookie)
{
struct fib6_node *fn;
bool status = false;

fn = ({ typeof(*(f6i->fib6_node)) *__UNIQUE_ID_rcu457 = (typeof(*(f6i->fib6_node)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_458(void) ; if (!((sizeof((f6i->fib6_node)) == sizeof(char) || sizeof((f6i->fib6_node)) == sizeof(short) || sizeof((f6i->fib6_node)) == sizeof(int) || sizeof((f6i->fib6_node)) == sizeof(long)) || sizeof((f6i->fib6_node)) == sizeof(long long))) __compiletime_assert_458(); } while (0); (*(const volatile typeof( _Generic(((f6i->fib6_node)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((f6i->fib6_node)))) *)&((f6i->fib6_node))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(f6i->fib6_node)) *)(__UNIQUE_ID_rcu457)); });

if (fn) {
*cookie = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_459(void) ; if (!((sizeof(fn->fn_sernum) == sizeof(char) || sizeof(fn->fn_sernum) == sizeof(short) || sizeof(fn->fn_sernum) == sizeof(int) || sizeof(fn->fn_sernum) == sizeof(long)) || sizeof(fn->fn_sernum) == sizeof(long long))) __compiletime_assert_459(); } while (0); (*(const volatile typeof( _Generic((fn->fn_sernum), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (fn->fn_sernum))) *)&(fn->fn_sernum)); });

do { do { } while (0); __asm__ __volatile__ ("fence " "r" "," "r" : : : "memory"); } while (0);
status = true;
}

return status;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 rt6_get_cookie(const struct rt6_info *rt)
{
struct fib6_info *from;
u32 cookie = 0;

if (rt->sernum)
return rt->sernum;

rcu_read_lock();

from = ({ typeof(*(rt->from)) *__UNIQUE_ID_rcu460 = (typeof(*(rt->from)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_461(void) ; if (!((sizeof((rt->from)) == sizeof(char) || sizeof((rt->from)) == sizeof(short) || sizeof((rt->from)) == sizeof(int) || sizeof((rt->from)) == sizeof(long)) || sizeof((rt->from)) == sizeof(long long))) __compiletime_assert_461(); } while (0); (*(const volatile typeof( _Generic(((rt->from)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->from)))) *)&((rt->from))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->from)) *)(__UNIQUE_ID_rcu460)); });
if (from)
fib6_get_cookie_safe(from, &cookie);

rcu_read_unlock();

return cookie;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip6_rt_put(struct rt6_info *rt)
{



do { __attribute__((__noreturn__)) extern void __compiletime_assert_462(void) ; if (!(!(__builtin_offsetof(struct rt6_info, dst) != 0))) __compiletime_assert_462(); } while (0);
dst_release(&rt->dst);
}

struct fib6_info *fib6_info_alloc(gfp_t gfp_flags, bool with_fib6_nh);
void fib6_info_destroy_rcu(struct callback_head *head);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fib6_info_hold(struct fib6_info *f6i)
{
refcount_inc(&f6i->fib6_ref);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib6_info_hold_safe(struct fib6_info *f6i)
{
return refcount_inc_not_zero(&f6i->fib6_ref);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fib6_info_release(struct fib6_info *f6i)
{
if (f6i && refcount_dec_and_test(&f6i->fib6_ref))
call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
}

enum fib6_walk_state {



FWS_L,
FWS_R,
FWS_C,
FWS_U
};

struct fib6_walker {
struct list_head lh;
struct fib6_node *root, *node;
struct fib6_info *leaf;
enum fib6_walk_state state;
unsigned int skip;
unsigned int count;
unsigned int skip_in_node;
int (*func)(struct fib6_walker *);
void *args;
};

struct rt6_statistics {
__u32 fib_nodes;
__u32 fib_route_nodes;
__u32 fib_rt_entries;
__u32 fib_rt_cache;
__u32 fib_discarded_routes;


atomic_t fib_rt_alloc;
};
# 386 "./include/net/ip6_fib.h"
struct fib6_table {
struct hlist_node tb6_hlist;
u32 tb6_id;
spinlock_t tb6_lock;
struct fib6_node tb6_root;
struct inet_peer_base tb6_peers;
unsigned int flags;
unsigned int fib_seq;

};
# 413 "./include/net/ip6_fib.h"
typedef struct rt6_info *(*pol_lookup_t)(struct net *,
struct fib6_table *,
struct flowi6 *,
const struct sk_buff *, int);

struct fib6_entry_notifier_info {
struct fib_notifier_info info;
struct fib6_info *rt;
unsigned int nsiblings;
};





struct fib6_table *fib6_get_table(struct net *net, u32 id);
struct fib6_table *fib6_new_table(struct net *net, u32 id);
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
const struct sk_buff *skb,
int flags, pol_lookup_t lookup);




int fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
struct fib6_result *res, int flags);


int fib6_table_lookup(struct net *net, struct fib6_table *table,
int oif, struct flowi6 *fl6, struct fib6_result *res,
int strict);

void fib6_select_path(const struct net *net, struct fib6_result *res,
struct flowi6 *fl6, int oif, bool have_oif_match,
const struct sk_buff *skb, int strict);
struct fib6_node *fib6_node_lookup(struct fib6_node *root,
const struct in6_addr *daddr,
const struct in6_addr *saddr);

struct fib6_node *fib6_locate(struct fib6_node *root,
const struct in6_addr *daddr, int dst_len,
const struct in6_addr *saddr, int src_len,
bool exact_match);

void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *arg),
void *arg);
void fib6_clean_all_skip_notify(struct net *net,
int (*func)(struct fib6_info *, void *arg),
void *arg);

int fib6_add(struct fib6_node *root, struct fib6_info *rt,
struct nl_info *info, struct netlink_ext_ack *extack);
int fib6_del(struct fib6_info *rt, struct nl_info *info);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr)
{
const struct fib6_info *from;

rcu_read_lock();

from = ({ typeof(*(rt->from)) *__UNIQUE_ID_rcu463 = (typeof(*(rt->from)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_464(void) ; if (!((sizeof((rt->from)) == sizeof(char) || sizeof((rt->from)) == sizeof(short) || sizeof((rt->from)) == sizeof(int) || sizeof((rt->from)) == sizeof(long)) || sizeof((rt->from)) == sizeof(long long))) __compiletime_assert_464(); } while (0); (*(const volatile typeof( _Generic(((rt->from)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->from)))) *)&((rt->from))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->from)) *)(__UNIQUE_ID_rcu463)); });
if (from) {
*addr = from->fib6_prefsrc.addr;
} else {
struct in6_addr in6_zero = {};

*addr = in6_zero;
}

rcu_read_unlock();
}

int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void fib6_nh_release(struct fib6_nh *fib6_nh);
void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);

int call_fib6_entry_notifiers(struct net *net,
enum fib_event_type event_type,
struct fib6_info *rt,
struct netlink_ext_ack *extack);
int call_fib6_multipath_entry_notifiers(struct net *net,
enum fib_event_type event_type,
struct fib6_info *rt,
unsigned int nsiblings,
struct netlink_ext_ack *extack);
int call_fib6_entry_notifiers_replace(struct net *net, struct fib6_info *rt);
void fib6_rt_update(struct net *net, struct fib6_info *rt,
struct nl_info *info);
void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
unsigned int flags);

void fib6_run_gc(unsigned long expires, struct net *net, bool force);

void fib6_gc_cleanup(void);

int fib6_init(void);

struct ipv6_route_iter {
struct seq_net_private p;
struct fib6_walker w;
loff_t skip;
struct fib6_table *tbl;
int sernum;
};

extern const struct seq_operations ipv6_route_seq_ops;

int call_fib6_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
struct fib_notifier_info *info);
int call_fib6_notifiers(struct net *net, enum fib_event_type event_type,
struct fib_notifier_info *info);

int fib6_notifier_init(struct net *net);
void fib6_notifier_exit(struct net *net);

unsigned int fib6_tables_seq_read(struct net *net);
int fib6_tables_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);

void fib6_update_sernum(struct net *net, struct fib6_info *rt);
void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt);
void fib6_update_sernum_stub(struct net *net, struct fib6_info *f6i);

void fib6_metric_set(struct fib6_info *f6i, int metric, u32 val);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib6_metric_locked(struct fib6_info *f6i, int metric)
{
return !!(f6i->fib6_metrics->metrics[RTAX_LOCK - 1] & (1 << metric));
}
void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
bool offload, bool trap, bool offload_failed);


struct bpf_iter__ipv6_route {
union { struct bpf_iter_meta * meta; __u64 :64; } __attribute__((aligned(8)));
union { struct fib6_info * rt; __u64 :64; } __attribute__((aligned(8)));
};






;




;




;




;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct rt6_info *pol_lookup_func(pol_lookup_t lookup,
struct net *net,
struct fib6_table *table,
struct flowi6 *fl6,
const struct sk_buff *skb,
int flags)
{
return lookup(net, table, fl6, skb, flags);





}
# 621 "./include/net/ip6_fib.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib6_has_custom_rules(const struct net *net)
{
return false;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int fib6_rules_init(void)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void fib6_rules_cleanup(void)
{
return ;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib6_rule_default(const struct fib_rule *rule)
{
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int fib6_rules_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int fib6_rules_seq_read(struct net *net)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool fib6_rules_early_flow_dissect(struct net *net,
struct sk_buff *skb,
struct flowi6 *fl6,
struct flow_keys *flkeys)
{
return false;
}
# 49 "net/ipv6/route.c" 2
# 1 "./include/net/ip6_route.h" 1




struct route_info {
__u8 type;
__u8 length;
__u8 prefix_len;





__u8 reserved_l:3,
route_pref:2,
reserved_h:3;

__be32 lifetime;
__u8 prefix[];
};


# 1 "./include/net/addrconf.h" 1
# 29 "./include/net/addrconf.h"
struct prefix_info {
__u8 type;
__u8 length;
__u8 prefix_len;






__u8 reserved : 6,
autoconf : 1,
onlink : 1;



__be32 valid;
__be32 prefered;
__be32 reserved2;

struct in6_addr prefix;
};






struct in6_validator_info {
struct in6_addr i6vi_addr;
struct inet6_dev *i6vi_dev;
struct netlink_ext_ack *extack;
};

struct ifa6_config {
const struct in6_addr *pfx;
unsigned int plen;

u8 ifa_proto;

const struct in6_addr *peer_pfx;

u32 rt_priority;
u32 ifa_flags;
u32 preferred_lft;
u32 valid_lft;
u16 scope;
};

int addrconf_init(void);
void addrconf_cleanup(void);

int addrconf_add_ifaddr(struct net *net, void *arg);
int addrconf_del_ifaddr(struct net *net, void *arg);
int addrconf_set_dstaddr(struct net *net, void *arg);

int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict);
int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, bool skip_dev_check,
int strict, u32 banned_flags);





int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
unsigned char nsegs);

bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
const unsigned int prefix_len,
struct net_device *dev);

int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);

struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
struct net_device *dev);

struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
const struct in6_addr *addr,
struct net_device *dev, int strict);

int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
struct in6_addr *saddr);
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
u32 banned_flags);
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
bool match_wildcard);
bool inet_rcv_saddr_any(const struct sock *sk);
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);

void addrconf_add_linklocal(struct inet6_dev *idev,
const struct in6_addr *addr, u32 flags);

int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
const struct prefix_info *pinfo,
struct inet6_dev *in6_dev,
const struct in6_addr *addr, int addr_type,
u32 addr_flags, bool sllao, bool tokenized,
__u32 valid_lft, u32 prefered_lft);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void addrconf_addr_eui48_base(u8 *eui, const char *const addr)
{
memcpy(eui, addr, 3);
eui[3] = 0xFF;
eui[4] = 0xFE;
memcpy(eui + 5, addr + 3, 3);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void addrconf_addr_eui48(u8 *eui, const char *const addr)
{
addrconf_addr_eui48_base(eui, addr);
eui[0] ^= 2;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
{
if (dev->addr_len != 6)
return -1;
# 165 "./include/net/addrconf.h"
addrconf_addr_eui48_base(eui, dev->dev_addr);

if (dev->dev_id) {
eui[3] = (dev->dev_id >> 8) & 0xFF;
eui[4] = dev->dev_id & 0xFF;
} else {
eui[0] ^= 2;
}

return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long addrconf_timeout_fixup(u32 timeout,
unsigned int unit)
{
if (timeout == 0xffffffff)
return ~0UL;






if (0xfffffffe > ((long)(~0UL >> 1)) / unit && timeout > ((long)(~0UL >> 1)) / unit)
return ((long)(~0UL >> 1)) / unit;

return timeout;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int addrconf_finite_timeout(unsigned long timeout)
{
return ~timeout;
}




int ipv6_addr_label_init(void);
void ipv6_addr_label_cleanup(void);
int ipv6_addr_label_rtnl_register(void);
u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
int type, int ifindex);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_mc_may_pull(struct sk_buff *skb,
unsigned int len)
{
if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
return false;

return pskb_may_pull(skb, len);
}

int ipv6_sock_mc_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
void __ipv6_sock_mc_close(struct sock *sk);
void ipv6_sock_mc_close(struct sock *sk);
bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
const struct in6_addr *src_addr);

int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr);
int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr);
int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr);
void ipv6_mc_up(struct inet6_dev *idev);
void ipv6_mc_down(struct inet6_dev *idev);
void ipv6_mc_unmap(struct inet6_dev *idev);
void ipv6_mc_remap(struct inet6_dev *idev);
void ipv6_mc_init_dev(struct inet6_dev *idev);
void ipv6_mc_destroy_dev(struct inet6_dev *idev);
int ipv6_mc_check_mld(struct sk_buff *skb);
void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);

bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
const struct in6_addr *src_addr);

void ipv6_mc_dad_complete(struct inet6_dev *idev);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_is_mld(struct sk_buff *skb, int nexthdr, int offset)
{
struct icmp6hdr *hdr;

if (nexthdr != 58 ||
!pskb_network_may_pull(skb, offset + sizeof(struct icmp6hdr)))
return false;

hdr = (struct icmp6hdr *)(skb_network_header(skb) + offset);

switch (hdr->icmp6_type) {
case 130:
case 131:
case 132:
case 143:
return true;
default:
break;
}
return false;
}

void addrconf_prefix_rcv(struct net_device *dev,
u8 *opt, int len, bool sllao);




int ipv6_sock_ac_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
void __ipv6_sock_ac_close(struct sock *sk);
void ipv6_sock_ac_close(struct sock *sk);

int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr);
int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
void ipv6_ac_destroy_dev(struct inet6_dev *idev);
bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
int ipv6_anycast_init(void);
void ipv6_anycast_cleanup(void);


int register_inet6addr_notifier(struct notifier_block *nb);
int unregister_inet6addr_notifier(struct notifier_block *nb);
int inet6addr_notifier_call_chain(unsigned long val, void *v);

int register_inet6addr_validator_notifier(struct notifier_block *nb);
int unregister_inet6addr_validator_notifier(struct notifier_block *nb);
int inet6addr_validator_notifier_call_chain(unsigned long val, void *v);

void inet6_netconf_notify_devconf(struct net *net, int event, int type,
int ifindex, struct ipv6_devconf *devconf);
# 313 "./include/net/addrconf.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inet6_dev *__in6_dev_get(const struct net_device *dev)
{
return ({ typeof(*(dev->ip6_ptr)) *__UNIQUE_ID_rcu465 = (typeof(*(dev->ip6_ptr)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_466(void) ; if (!((sizeof((dev->ip6_ptr)) == sizeof(char) || sizeof((dev->ip6_ptr)) == sizeof(short) || sizeof((dev->ip6_ptr)) == sizeof(int) || sizeof((dev->ip6_ptr)) == sizeof(long)) || sizeof((dev->ip6_ptr)) == sizeof(long long))) __compiletime_assert_466(); } while (0); (*(const volatile typeof( _Generic(((dev->ip6_ptr)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((dev->ip6_ptr)))) *)&((dev->ip6_ptr))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(dev->ip6_ptr)) *)(__UNIQUE_ID_rcu465)); });
}
# 326 "./include/net/addrconf.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inet6_dev *__in6_dev_stats_get(const struct net_device *dev,
const struct sk_buff *skb)
{
if (netif_is_l3_master(dev))
dev = dev_get_by_index_rcu(dev_net(dev), inet6_iif(skb));
return __in6_dev_get(dev);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inet6_dev *__in6_dev_get_safely(const struct net_device *dev)
{
if (__builtin_expect(!!(dev), 1))
return ({ typeof(*(dev->ip6_ptr)) *__UNIQUE_ID_rcu467 = (typeof(*(dev->ip6_ptr)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_468(void) ; if (!((sizeof((dev->ip6_ptr)) == sizeof(char) || sizeof((dev->ip6_ptr)) == sizeof(short) || sizeof((dev->ip6_ptr)) == sizeof(int) || sizeof((dev->ip6_ptr)) == sizeof(long)) || sizeof((dev->ip6_ptr)) == sizeof(long long))) __compiletime_assert_468(); } while (0); (*(const volatile typeof( _Generic(((dev->ip6_ptr)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((dev->ip6_ptr)))) *)&((dev->ip6_ptr))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(dev->ip6_ptr)) *)(__UNIQUE_ID_rcu467)); });
else
return ((void *)0);
}
# 356 "./include/net/addrconf.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inet6_dev *in6_dev_get(const struct net_device *dev)
{
struct inet6_dev *idev;

rcu_read_lock();
idev = ({ typeof(*(dev->ip6_ptr)) *__UNIQUE_ID_rcu469 = (typeof(*(dev->ip6_ptr)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_470(void) ; if (!((sizeof((dev->ip6_ptr)) == sizeof(char) || sizeof((dev->ip6_ptr)) == sizeof(short) || sizeof((dev->ip6_ptr)) == sizeof(int) || sizeof((dev->ip6_ptr)) == sizeof(long)) || sizeof((dev->ip6_ptr)) == sizeof(long long))) __compiletime_assert_470(); } while (0); (*(const volatile typeof( _Generic(((dev->ip6_ptr)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((dev->ip6_ptr)))) *)&((dev->ip6_ptr))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(dev->ip6_ptr)) *)(__UNIQUE_ID_rcu469)); });
if (idev)
refcount_inc(&idev->refcnt);
rcu_read_unlock();
return idev;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neigh_parms *__in6_dev_nd_parms_get_rcu(const struct net_device *dev)
{
struct inet6_dev *idev = __in6_dev_get(dev);

return idev ? idev->nd_parms : ((void *)0);
}

void in6_dev_finish_destroy(struct inet6_dev *idev);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void in6_dev_put(struct inet6_dev *idev)
{
if (refcount_dec_and_test(&idev->refcnt))
in6_dev_finish_destroy(idev);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void in6_dev_put_clear(struct inet6_dev **pidev)
{
struct inet6_dev *idev = *pidev;

if (idev) {
in6_dev_put(idev);
*pidev = ((void *)0);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __in6_dev_put(struct inet6_dev *idev)
{
refcount_dec(&idev->refcnt);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void in6_dev_hold(struct inet6_dev *idev)
{
refcount_inc(&idev->refcnt);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ip6_ignore_linkdown(const struct net_device *dev)
{
const struct inet6_dev *idev = __in6_dev_get(dev);

return !!idev->cnf.ignore_routes_with_linkdown;
}

void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void in6_ifa_put(struct inet6_ifaddr *ifp)
{
if (refcount_dec_and_test(&ifp->refcnt))
inet6_ifa_finish_destroy(ifp);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __in6_ifa_put(struct inet6_ifaddr *ifp)
{
refcount_dec(&ifp->refcnt);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void in6_ifa_hold(struct inet6_ifaddr *ifp)
{
refcount_inc(&ifp->refcnt);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void addrconf_addr_solict_mult(const struct in6_addr *addr,
struct in6_addr *solicited)
{
ipv6_addr_set(solicited,
(( __be32)(__builtin_constant_p((__u32)((0xFF020000))) ? ((__u32)( (((__u32)((0xFF020000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xFF020000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xFF020000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xFF020000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xFF020000)))), 0,
(( __be32)(__builtin_constant_p((__u32)((0x1))) ? ((__u32)( (((__u32)((0x1)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x1)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x1)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x1)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x1)))),
(( __be32)(__builtin_constant_p((__u32)((0xFF000000))) ? ((__u32)( (((__u32)((0xFF000000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xFF000000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xFF000000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xFF000000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xFF000000)))) | addr->in6_u.u6_addr32[3]);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
{




return ((addr->in6_u.u6_addr32[0] ^ (( __be32)(__builtin_constant_p((__u32)((0xff020000))) ? ((__u32)( (((__u32)((0xff020000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xff020000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xff020000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xff020000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xff020000))))) |
addr->in6_u.u6_addr32[1] | addr->in6_u.u6_addr32[2] |
(addr->in6_u.u6_addr32[3] ^ (( __be32)(__builtin_constant_p((__u32)((0x00000001))) ? ((__u32)( (((__u32)((0x00000001)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x00000001)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x00000001)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x00000001)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x00000001)))))) == 0;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_addr_is_ll_all_routers(const struct in6_addr *addr)
{




return ((addr->in6_u.u6_addr32[0] ^ (( __be32)(__builtin_constant_p((__u32)((0xff020000))) ? ((__u32)( (((__u32)((0xff020000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xff020000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xff020000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xff020000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xff020000))))) |
addr->in6_u.u6_addr32[1] | addr->in6_u.u6_addr32[2] |
(addr->in6_u.u6_addr32[3] ^ (( __be32)(__builtin_constant_p((__u32)((0x00000002))) ? ((__u32)( (((__u32)((0x00000002)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x00000002)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x00000002)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x00000002)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x00000002)))))) == 0;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_addr_is_isatap(const struct in6_addr *addr)
{
return (addr->in6_u.u6_addr32[2] | (( __be32)(__builtin_constant_p((__u32)((0x02000000))) ? ((__u32)( (((__u32)((0x02000000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x02000000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x02000000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x02000000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x02000000))))) == (( __be32)(__builtin_constant_p((__u32)((0x02005EFE))) ? ((__u32)( (((__u32)((0x02005EFE)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x02005EFE)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x02005EFE)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x02005EFE)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x02005EFE))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
{






return ((addr->in6_u.u6_addr32[0] ^ (( __be32)(__builtin_constant_p((__u32)((0xff020000))) ? ((__u32)( (((__u32)((0xff020000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xff020000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xff020000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xff020000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xff020000))))) |
addr->in6_u.u6_addr32[1] |
(addr->in6_u.u6_addr32[2] ^ (( __be32)(__builtin_constant_p((__u32)((0x00000001))) ? ((__u32)( (((__u32)((0x00000001)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x00000001)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x00000001)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x00000001)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x00000001))))) |
(addr->in6_u.u6_addr8[12] ^ 0xff)) == 0;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_addr_is_all_snoopers(const struct in6_addr *addr)
{






return ((addr->in6_u.u6_addr32[0] ^ (( __be32)(__builtin_constant_p((__u32)((0xff020000))) ? ((__u32)( (((__u32)((0xff020000)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xff020000)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xff020000)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xff020000)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xff020000))))) |
addr->in6_u.u6_addr32[1] | addr->in6_u.u6_addr32[2] |
(addr->in6_u.u6_addr32[3] ^ (( __be32)(__builtin_constant_p((__u32)((0x0000006a))) ? ((__u32)( (((__u32)((0x0000006a)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x0000006a)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x0000006a)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x0000006a)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x0000006a)))))) == 0;

}


int if6_proc_init(void);
void if6_proc_exit(void);
# 23 "./include/net/ip6_route.h" 2



# 1 "./include/net/lwtunnel.h" 1




# 1 "./include/uapi/linux/lwtunnel.h" 1






enum lwtunnel_encap_types {
LWTUNNEL_ENCAP_NONE,
LWTUNNEL_ENCAP_MPLS,
LWTUNNEL_ENCAP_IP,
LWTUNNEL_ENCAP_ILA,
LWTUNNEL_ENCAP_IP6,
LWTUNNEL_ENCAP_SEG6,
LWTUNNEL_ENCAP_BPF,
LWTUNNEL_ENCAP_SEG6_LOCAL,
LWTUNNEL_ENCAP_RPL,
LWTUNNEL_ENCAP_IOAM6,
__LWTUNNEL_ENCAP_MAX,
};



enum lwtunnel_ip_t {
LWTUNNEL_IP_UNSPEC,
LWTUNNEL_IP_ID,
LWTUNNEL_IP_DST,
LWTUNNEL_IP_SRC,
LWTUNNEL_IP_TTL,
LWTUNNEL_IP_TOS,
LWTUNNEL_IP_FLAGS,
LWTUNNEL_IP_PAD,
LWTUNNEL_IP_OPTS,
__LWTUNNEL_IP_MAX,
};



enum lwtunnel_ip6_t {
LWTUNNEL_IP6_UNSPEC,
LWTUNNEL_IP6_ID,
LWTUNNEL_IP6_DST,
LWTUNNEL_IP6_SRC,
LWTUNNEL_IP6_HOPLIMIT,
LWTUNNEL_IP6_TC,
LWTUNNEL_IP6_FLAGS,
LWTUNNEL_IP6_PAD,
LWTUNNEL_IP6_OPTS,
__LWTUNNEL_IP6_MAX,
};



enum {
LWTUNNEL_IP_OPTS_UNSPEC,
LWTUNNEL_IP_OPTS_GENEVE,
LWTUNNEL_IP_OPTS_VXLAN,
LWTUNNEL_IP_OPTS_ERSPAN,
__LWTUNNEL_IP_OPTS_MAX,
};



enum {
LWTUNNEL_IP_OPT_GENEVE_UNSPEC,
LWTUNNEL_IP_OPT_GENEVE_CLASS,
LWTUNNEL_IP_OPT_GENEVE_TYPE,
LWTUNNEL_IP_OPT_GENEVE_DATA,
__LWTUNNEL_IP_OPT_GENEVE_MAX,
};



enum {
LWTUNNEL_IP_OPT_VXLAN_UNSPEC,
LWTUNNEL_IP_OPT_VXLAN_GBP,
__LWTUNNEL_IP_OPT_VXLAN_MAX,
};



enum {
LWTUNNEL_IP_OPT_ERSPAN_UNSPEC,
LWTUNNEL_IP_OPT_ERSPAN_VER,
LWTUNNEL_IP_OPT_ERSPAN_INDEX,
LWTUNNEL_IP_OPT_ERSPAN_DIR,
LWTUNNEL_IP_OPT_ERSPAN_HWID,
__LWTUNNEL_IP_OPT_ERSPAN_MAX,
};



enum {
LWT_BPF_PROG_UNSPEC,
LWT_BPF_PROG_FD,
LWT_BPF_PROG_NAME,
__LWT_BPF_PROG_MAX,
};



enum {
LWT_BPF_UNSPEC,
LWT_BPF_IN,
LWT_BPF_OUT,
LWT_BPF_XMIT,
LWT_BPF_XMIT_HEADROOM,
__LWT_BPF_MAX,
};
# 6 "./include/net/lwtunnel.h" 2



# 1 "./include/net/route.h" 1
# 28 "./include/net/route.h"
# 1 "./include/net/arp.h" 1
# 11 "./include/net/arp.h"
extern struct neigh_table arp_tbl;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 *hash_rnd)
{
u32 key = *(const u32 *)pkey;
u32 val = key ^ hash32_ptr(dev);

return val * hash_rnd[0];
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
{
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
key = ((unsigned long int) 0x00000000);

return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
}
# 37 "./include/net/arp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
{
struct neighbour *n;

rcu_read_lock_bh();
n = __ipv4_neigh_lookup_noref(dev, key);
if (n && !refcount_inc_not_zero(&n->refcnt))
n = ((void *)0);
rcu_read_unlock_bh();

return n;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
{
struct neighbour *n;

rcu_read_lock_bh();
n = __ipv4_neigh_lookup_noref(dev, key);
neigh_confirm(n);
rcu_read_unlock_bh();
}

void arp_init(void);
int arp_ioctl(struct net *net, unsigned int cmd, void *arg);
void arp_send(int type, int ptype, __be32 dest_ip,
struct net_device *dev, __be32 src_ip,
const unsigned char *dest_hw,
const unsigned char *src_hw, const unsigned char *th);
int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
void arp_ifdown(struct net_device *dev);
int arp_invalidate(struct net_device *dev, __be32 ip, bool force);

struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
struct net_device *dev, __be32 src_ip,
const unsigned char *dest_hw,
const unsigned char *src_hw,
const unsigned char *target_hw);
void arp_xmit(struct sk_buff *skb);
# 29 "./include/net/route.h" 2
# 1 "./include/net/ndisc.h" 1




# 1 "./include/net/ipv6_stubs.h" 1
# 14 "./include/net/ipv6_stubs.h"
struct fib6_info;
struct fib6_nh;
struct fib6_config;
struct fib6_result;




struct ipv6_stub {
int (*ipv6_sock_mc_join)(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
const struct in6_addr *addr);
struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net,
const struct sock *sk,
struct flowi6 *fl6,
const struct in6_addr *final_dst);
int (*ipv6_route_input)(struct sk_buff *skb);

struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
int (*fib6_lookup)(struct net *net, int oif, struct flowi6 *fl6,
struct fib6_result *res, int flags);
int (*fib6_table_lookup)(struct net *net, struct fib6_table *table,
int oif, struct flowi6 *fl6,
struct fib6_result *res, int flags);
void (*fib6_select_path)(const struct net *net, struct fib6_result *res,
struct flowi6 *fl6, int oif, bool oif_match,
const struct sk_buff *skb, int strict);
u32 (*ip6_mtu_from_fib6)(const struct fib6_result *res,
const struct in6_addr *daddr,
const struct in6_addr *saddr);

int (*fib6_nh_init)(struct net *net, struct fib6_nh *fib6_nh,
struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh);
void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify);
void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
struct nl_info *info);

void (*udpv6_encap_enable)(void);
void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr,
const struct in6_addr *solicited_addr,
bool router, bool solicited, bool override, bool inc_opt);






struct neigh_table *nd_tbl;

int (*ipv6_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
struct net_device *(*ipv6_dev_find)(struct net *net, const struct in6_addr *addr,
struct net_device *dev);
};
extern const struct ipv6_stub *ipv6_stub ;


struct ipv6_bpf_stub {
int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len,
u32 flags);
struct sock *(*udp6_lib_lookup)(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *tbl,
struct sk_buff *skb);
};
extern const struct ipv6_bpf_stub *ipv6_bpf_stub ;
# 6 "./include/net/ndisc.h" 2
# 30 "./include/net/ndisc.h"
enum {
__ND_OPT_PREFIX_INFO_END = 0,
ND_OPT_SOURCE_LL_ADDR = 1,
ND_OPT_TARGET_LL_ADDR = 2,
ND_OPT_PREFIX_INFO = 3,
ND_OPT_REDIRECT_HDR = 4,
ND_OPT_MTU = 5,
ND_OPT_NONCE = 14,
__ND_OPT_ARRAY_MAX,
ND_OPT_ROUTE_INFO = 24,
ND_OPT_RDNSS = 25,
ND_OPT_DNSSL = 31,
ND_OPT_6CO = 34,
ND_OPT_CAPTIVE_PORTAL = 37,
ND_OPT_PREF64 = 38,
__ND_OPT_MAX
};








# 1 "./include/linux/icmpv6.h" 1








static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
{
return (struct icmp6hdr *)skb_transport_header(skb);
}





typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr,
const struct inet6_skb_parm *parm);
void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr,
const struct inet6_skb_parm *parm);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct inet6_skb_parm *parm)
{
icmp6_send(skb, type, code, info, ((void *)0), parm);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
{
do { __attribute__((__noreturn__)) extern void __compiletime_assert_471(void) ; if (!(!(fn != icmp6_send))) __compiletime_assert_471(); } while (0);
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
{
do { __attribute__((__noreturn__)) extern void __compiletime_assert_472(void) ; if (!(!(fn != icmp6_send))) __compiletime_assert_472(); } while (0);
return 0;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
{
__icmpv6_send(skb, type, code, info, ((struct inet6_skb_parm*)((skb)->cb)));
}

int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
unsigned int data_len);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
{
struct inet6_skb_parm parm = { 0 };
__icmpv6_send(skb_in, type, code, info, &parm);
}
# 78 "./include/linux/icmpv6.h"
extern int icmpv6_init(void);
extern int icmpv6_err_convert(u8 type, u8 code,
int *err);
extern void icmpv6_cleanup(void);
extern void icmpv6_param_prob(struct sk_buff *skb,
u8 code, int pos);

struct flowi6;
struct in6_addr;
extern void icmpv6_flow_init(struct sock *sk,
struct flowi6 *fl6,
u8 type,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
int oif);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool icmpv6_is_err(int type)
{
switch (type) {
case 1:
case 2:
case 3:
case 4:
return true;
}

return false;
}
# 55 "./include/net/ndisc.h" 2
# 72 "./include/net/ndisc.h"
struct ctl_table;
struct inet6_dev;
struct net_device;
struct net_proto_family;
struct sk_buff;
struct prefix_info;

extern struct neigh_table nd_tbl;

struct nd_msg {
struct icmp6hdr icmph;
struct in6_addr target;
__u8 opt[];
};

struct rs_msg {
struct icmp6hdr icmph;
__u8 opt[];
};

struct ra_msg {
struct icmp6hdr icmph;
__be32 reachable_time;
__be32 retrans_timer;
};

struct rd_msg {
struct icmp6hdr icmph;
struct in6_addr target;
struct in6_addr dest;
__u8 opt[];
};

struct nd_opt_hdr {
__u8 nd_opt_type;
__u8 nd_opt_len;
} __attribute__((__packed__));


struct ndisc_options {
struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX];




struct nd_opt_hdr *nd_useropts;
struct nd_opt_hdr *nd_useropts_end;



};
# 136 "./include/net/ndisc.h"
struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
u8 *opt, int opt_len,
struct ndisc_options *ndopts);

void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
int data_len, int pad);
# 202 "./include/net/ndisc.h"
struct ndisc_ops {
int (*is_useropt)(u8 nd_opt_type);
int (*parse_options)(const struct net_device *dev,
struct nd_opt_hdr *nd_opt,
struct ndisc_options *ndopts);
void (*update)(const struct net_device *dev, struct neighbour *n,
u32 flags, u8 icmp6_type,
const struct ndisc_options *ndopts);
int (*opt_addr_space)(const struct net_device *dev, u8 icmp6_type,
struct neighbour *neigh, u8 *ha_buf,
u8 **ha);
void (*fill_addr_option)(const struct net_device *dev,
struct sk_buff *skb, u8 icmp6_type,
const u8 *ha);
void (*prefix_rcv_add_addr)(struct net *net, struct net_device *dev,
const struct prefix_info *pinfo,
struct inet6_dev *in6_dev,
struct in6_addr *addr,
int addr_type, u32 addr_flags,
bool sllao, bool tokenized,
__u32 valid_lft, u32 prefered_lft,
bool dev_addr_generated);
};


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ndisc_ops_is_useropt(const struct net_device *dev,
u8 nd_opt_type)
{
if (dev->ndisc_ops && dev->ndisc_ops->is_useropt)
return dev->ndisc_ops->is_useropt(nd_opt_type);
else
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ndisc_ops_parse_options(const struct net_device *dev,
struct nd_opt_hdr *nd_opt,
struct ndisc_options *ndopts)
{
if (dev->ndisc_ops && dev->ndisc_ops->parse_options)
return dev->ndisc_ops->parse_options(dev, nd_opt, ndopts);
else
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ndisc_ops_update(const struct net_device *dev,
struct neighbour *n, u32 flags,
u8 icmp6_type,
const struct ndisc_options *ndopts)
{
if (dev->ndisc_ops && dev->ndisc_ops->update)
dev->ndisc_ops->update(dev, n, flags, icmp6_type, ndopts);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ndisc_ops_opt_addr_space(const struct net_device *dev,
u8 icmp6_type)
{
if (dev->ndisc_ops && dev->ndisc_ops->opt_addr_space &&
icmp6_type != 137)
return dev->ndisc_ops->opt_addr_space(dev, icmp6_type, ((void *)0),
((void *)0), ((void *)0));
else
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ndisc_ops_redirect_opt_addr_space(const struct net_device *dev,
struct neighbour *neigh,
u8 *ha_buf, u8 **ha)
{
if (dev->ndisc_ops && dev->ndisc_ops->opt_addr_space)
return dev->ndisc_ops->opt_addr_space(dev, 137,
neigh, ha_buf, ha);
else
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ndisc_ops_fill_addr_option(const struct net_device *dev,
struct sk_buff *skb,
u8 icmp6_type)
{
if (dev->ndisc_ops && dev->ndisc_ops->fill_addr_option &&
icmp6_type != 137)
dev->ndisc_ops->fill_addr_option(dev, skb, icmp6_type, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ndisc_ops_fill_redirect_addr_option(const struct net_device *dev,
struct sk_buff *skb,
const u8 *ha)
{
if (dev->ndisc_ops && dev->ndisc_ops->fill_addr_option)
dev->ndisc_ops->fill_addr_option(dev, skb, 137, ha);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ndisc_ops_prefix_rcv_add_addr(struct net *net,
struct net_device *dev,
const struct prefix_info *pinfo,
struct inet6_dev *in6_dev,
struct in6_addr *addr,
int addr_type, u32 addr_flags,
bool sllao, bool tokenized,
__u32 valid_lft,
u32 prefered_lft,
bool dev_addr_generated)
{
if (dev->ndisc_ops && dev->ndisc_ops->prefix_rcv_add_addr)
dev->ndisc_ops->prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
addr, addr_type,
addr_flags, sllao,
tokenized, valid_lft,
prefered_lft,
dev_addr_generated);
}
# 321 "./include/net/ndisc.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ndisc_addr_option_pad(unsigned short type)
{
switch (type) {
case 32: return 2;
default: return 0;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __ndisc_opt_addr_space(unsigned char addr_len, int pad)
{
return (((addr_len + pad)+2+7)&~7);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ndisc_opt_addr_space(struct net_device *dev, u8 icmp6_type)
{
return __ndisc_opt_addr_space(dev->addr_len,
ndisc_addr_option_pad(dev->type)) +
ndisc_ops_opt_addr_space(dev, icmp6_type);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ndisc_redirect_opt_addr_space(struct net_device *dev,
struct neighbour *neigh,
u8 *ops_data_buf,
u8 **ops_data)
{
return __ndisc_opt_addr_space(dev->addr_len,
ndisc_addr_option_pad(dev->type)) +
ndisc_ops_redirect_opt_addr_space(dev, neigh, ops_data_buf,
ops_data);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 *__ndisc_opt_addr_data(struct nd_opt_hdr *p,
unsigned char addr_len, int prepad)
{
u8 *lladdr = (u8 *)(p + 1);
int lladdrlen = p->nd_opt_len << 3;
if (lladdrlen != __ndisc_opt_addr_space(addr_len, prepad))
return ((void *)0);
return lladdr + prepad;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
struct net_device *dev)
{
return __ndisc_opt_addr_data(p, dev->addr_len,
ndisc_addr_option_pad(dev->type));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd)
{
const u32 *p32 = pkey;

return (((p32[0] ^ hash32_ptr(dev)) * hash_rnd[0]) +
(p32[1] * hash_rnd[1]) +
(p32[2] * hash_rnd[2]) +
(p32[3] * hash_rnd[3]));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev, const void *pkey)
{
return ___neigh_lookup_noref(&nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct neighbour *__ipv6_neigh_lookup_noref_stub(struct net_device *dev,
const void *pkey)
{
return ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
ndisc_hashfn, pkey, dev);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey)
{
struct neighbour *n;

rcu_read_lock_bh();
n = __ipv6_neigh_lookup_noref(dev, pkey);
if (n && !refcount_inc_not_zero(&n->refcnt))
n = ((void *)0);
rcu_read_unlock_bh();

return n;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __ipv6_confirm_neigh(struct net_device *dev,
const void *pkey)
{
struct neighbour *n;

rcu_read_lock_bh();
n = __ipv6_neigh_lookup_noref(dev, pkey);
neigh_confirm(n);
rcu_read_unlock_bh();
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __ipv6_confirm_neigh_stub(struct net_device *dev,
const void *pkey)
{
struct neighbour *n;

rcu_read_lock_bh();
n = __ipv6_neigh_lookup_noref_stub(dev, pkey);
neigh_confirm(n);
rcu_read_unlock_bh();
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *ip_neigh_gw6(struct net_device *dev,
const void *addr)
{
struct neighbour *neigh;

neigh = __ipv6_neigh_lookup_noref_stub(dev, addr);
if (__builtin_expect(!!(!neigh), 0))
neigh = __neigh_create(ipv6_stub->nd_tbl, addr, dev, false);

return neigh;
}

int ndisc_init(void);
int ndisc_late_init(void);

void ndisc_late_cleanup(void);
void ndisc_cleanup(void);

int ndisc_rcv(struct sk_buff *skb);

struct sk_buff *ndisc_ns_create(struct net_device *dev, const struct in6_addr *solicit,
const struct in6_addr *saddr, u64 nonce);
void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
const struct in6_addr *daddr, const struct in6_addr *saddr,
u64 nonce);

void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
const struct in6_addr *saddr);

void ndisc_send_rs(struct net_device *dev,
const struct in6_addr *saddr, const struct in6_addr *daddr);
void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
const struct in6_addr *solicited_addr,
bool router, bool solicited, bool override, bool inc_opt);

void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target);

int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev,
int dir);

void ndisc_update(const struct net_device *dev, struct neighbour *neigh,
const u8 *lladdr, u8 new, u32 flags, u8 icmp6_type,
struct ndisc_options *ndopts);




int igmp6_init(void);
int igmp6_late_init(void);

void igmp6_cleanup(void);
void igmp6_late_cleanup(void);

void igmp6_event_query(struct sk_buff *skb);

void igmp6_event_report(struct sk_buff *skb);



int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
void *oldval, size_t *oldlenp,
void *newval, size_t newlen);


void inet6_ifinfo_notify(int event, struct inet6_dev *idev);
# 30 "./include/net/route.h" 2
# 1 "./include/uapi/linux/in_route.h" 1
# 31 "./include/net/route.h" 2



# 1 "./include/linux/ip.h" 1
# 17 "./include/linux/ip.h"
# 1 "./include/uapi/linux/ip.h" 1
# 86 "./include/uapi/linux/ip.h"
struct iphdr {

__u8 ihl:4,
version:4;






__u8 tos;
__be16 tot_len;
__be16 id;
__be16 frag_off;
__u8 ttl;
__u8 protocol;
__sum16 check;
__be32 saddr;
__be32 daddr;

};


struct ip_auth_hdr {
__u8 nexthdr;
__u8 hdrlen;
__be16 reserved;
__be32 spi;
__be32 seq_no;
__u8 auth_data[0];
};

struct ip_esp_hdr {
__be32 spi;
__be32 seq_no;
__u8 enc_data[0];
};

struct ip_comp_hdr {
__u8 nexthdr;
__u8 flags;
__be16 cpi;
};

struct ip_beet_phdr {
__u8 nexthdr;
__u8 hdrlen;
__u8 padlen;
__u8 reserved;
};


enum
{
IPV4_DEVCONF_FORWARDING=1,
IPV4_DEVCONF_MC_FORWARDING,
IPV4_DEVCONF_PROXY_ARP,
IPV4_DEVCONF_ACCEPT_REDIRECTS,
IPV4_DEVCONF_SECURE_REDIRECTS,
IPV4_DEVCONF_SEND_REDIRECTS,
IPV4_DEVCONF_SHARED_MEDIA,
IPV4_DEVCONF_RP_FILTER,
IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
IPV4_DEVCONF_BOOTP_RELAY,
IPV4_DEVCONF_LOG_MARTIANS,
IPV4_DEVCONF_TAG,
IPV4_DEVCONF_ARPFILTER,
IPV4_DEVCONF_MEDIUM_ID,
IPV4_DEVCONF_NOXFRM,
IPV4_DEVCONF_NOPOLICY,
IPV4_DEVCONF_FORCE_IGMP_VERSION,
IPV4_DEVCONF_ARP_ANNOUNCE,
IPV4_DEVCONF_ARP_IGNORE,
IPV4_DEVCONF_PROMOTE_SECONDARIES,
IPV4_DEVCONF_ARP_ACCEPT,
IPV4_DEVCONF_ARP_NOTIFY,
IPV4_DEVCONF_ACCEPT_LOCAL,
IPV4_DEVCONF_SRC_VMARK,
IPV4_DEVCONF_PROXY_ARP_PVLAN,
IPV4_DEVCONF_ROUTE_LOCALNET,
IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL,
IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL,
IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
IPV4_DEVCONF_DROP_GRATUITOUS_ARP,
IPV4_DEVCONF_BC_FORWARDING,
IPV4_DEVCONF_ARP_EVICT_NOCARRIER,
__IPV4_DEVCONF_MAX
};
# 18 "./include/linux/ip.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct iphdr *ip_hdr(const struct sk_buff *skb)
{
return (struct iphdr *)skb_network_header(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct iphdr *inner_ip_hdr(const struct sk_buff *skb)
{
return (struct iphdr *)skb_inner_network_header(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct iphdr *ipip_hdr(const struct sk_buff *skb)
{
return (struct iphdr *)skb_transport_header(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int ip_transport_len(const struct sk_buff *skb)
{
return (__builtin_constant_p((__u16)(( __u16)(__be16)(ip_hdr(skb)->tot_len))) ? ((__u16)( (((__u16)(( __u16)(__be16)(ip_hdr(skb)->tot_len)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(ip_hdr(skb)->tot_len)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(ip_hdr(skb)->tot_len))) - skb_network_header_len(skb);
}
# 35 "./include/net/route.h" 2
# 46 "./include/net/route.h"
struct ip_tunnel_info;
struct fib_nh;
struct fib_info;
struct uncached_list;
struct rtable {
struct dst_entry dst;

int rt_genid;
unsigned int rt_flags;
__u16 rt_type;
__u8 rt_is_input;
__u8 rt_uses_gateway;

int rt_iif;

u8 rt_gw_family;

union {
__be32 rt_gw4;
struct in6_addr rt_gw6;
};


u32 rt_mtu_locked:1,
rt_pmtu:31;

struct list_head rt_uncached;
struct uncached_list *rt_uncached_list;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool rt_is_input_route(const struct rtable *rt)
{
return rt->rt_is_input != 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool rt_is_output_route(const struct rtable *rt)
{
return rt->rt_is_input == 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be32 rt_nexthop(const struct rtable *rt, __be32 daddr)
{
if (rt->rt_gw_family == 2)
return rt->rt_gw4;
return daddr;
}

struct ip_rt_acct {
__u32 o_bytes;
__u32 o_packets;
__u32 i_bytes;
__u32 i_packets;
};

struct rt_cache_stat {
unsigned int in_slow_tot;
unsigned int in_slow_mc;
unsigned int in_no_route;
unsigned int in_brd;
unsigned int in_martian_dst;
unsigned int in_martian_src;
unsigned int out_slow_tot;
unsigned int out_slow_mc;
};

extern struct ip_rt_acct *ip_rt_acct;

struct in_device;

int ip_rt_init(void);
void rt_cache_flush(struct net *net);
void rt_flush_dev(struct net_device *dev);
struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *flp,
const struct sk_buff *skb);
struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *flp,
struct fib_result *res,
const struct sk_buff *skb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct rtable *__ip_route_output_key(struct net *net,
struct flowi4 *flp)
{
return ip_route_output_key_hash(net, flp, ((void *)0));
}

struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
const struct sock *sk);
struct rtable *ip_route_output_tunnel(struct sk_buff *skb,
struct net_device *dev,
struct net *net, __be32 *saddr,
const struct ip_tunnel_info *info,
u8 protocol, bool use_cache);

struct dst_entry *ipv4_blackhole_route(struct net *net,
struct dst_entry *dst_orig);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct rtable *ip_route_output_key(struct net *net, struct flowi4 *flp)
{
return ip_route_output_flow(net, flp, ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct rtable *ip_route_output(struct net *net, __be32 daddr,
__be32 saddr, u8 tos, int oif)
{
struct flowi4 fl4 = {
.__fl_common.flowic_oif = oif,
.__fl_common.flowic_tos = tos,
.daddr = daddr,
.saddr = saddr,
};
return ip_route_output_key(net, &fl4);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct rtable *ip_route_output_ports(struct net *net, struct flowi4 *fl4,
struct sock *sk,
__be32 daddr, __be32 saddr,
__be16 dport, __be16 sport,
__u8 proto, __u8 tos, int oif)
{
flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
RT_SCOPE_UNIVERSE, proto,
sk ? inet_sk_flowi_flags(sk) : 0,
daddr, saddr, dport, sport, sock_net_uid(net, sk));
if (sk)
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(net, fl4, sk);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct rtable *ip_route_output_gre(struct net *net, struct flowi4 *fl4,
__be32 daddr, __be32 saddr,
__be32 gre_key, __u8 tos, int oif)
{
memset(fl4, 0, sizeof(*fl4));
fl4->__fl_common.flowic_oif = oif;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->__fl_common.flowic_tos = tos;
fl4->__fl_common.flowic_proto = IPPROTO_GRE;
fl4->uli.gre_key = gre_key;
return ip_route_output_key(net, fl4);
}
int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev,
struct in_device *in_dev, u32 *itag);
int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin);
int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin,
struct fib_result *res);

int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin,
const struct sk_buff *hint);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin)
{
int err;

rcu_read_lock();
err = ip_route_input_noref(skb, dst, src, tos, devin);
if (!err) {
skb_dst_force(skb);
if (!skb_dst(skb))
err = -22;
}
rcu_read_unlock();

return err;
}

void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif,
u8 protocol);
void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u8 protocol);
void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
void ip_rt_send_redirect(struct sk_buff *skb);

unsigned int inet_addr_type(struct net *net, __be32 addr);
unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id);
unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
__be32 addr);
unsigned int inet_addr_type_dev_table(struct net *net,
const struct net_device *dev,
__be32 addr);
void ip_rt_multicast_event(struct in_device *);
int ip_rt_ioctl(struct net *, unsigned int cmd, struct rtentry *rt);
void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
struct rtable *rt_dst_alloc(struct net_device *dev,
unsigned int flags, u16 type,
bool nopolicy, bool noxfrm);
struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt);

struct in_ifaddr;
void fib_add_ifaddr(struct in_ifaddr *);
void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
void fib_modify_prefix_metric(struct in_ifaddr *ifa, u32 new_metric);

void rt_add_uncached_list(struct rtable *rt);
void rt_del_uncached_list(struct rtable *rt);

int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
u32 table_id, struct fib_info *fi,
int *fa_index, int fa_start, unsigned int flags);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip_rt_put(struct rtable *rt)
{



do { __attribute__((__noreturn__)) extern void __compiletime_assert_473(void) ; if (!(!(__builtin_offsetof(struct rtable, dst) != 0))) __compiletime_assert_473(); } while (0);
dst_release(&rt->dst);
}



extern const __u8 ip_tos2prio[16];

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) char rt_tos2priority(u8 tos)
{
return ip_tos2prio[((tos)&0x1E)>>1];
}
# 292 "./include/net/route.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32 src,
u32 tos, int oif, u8 protocol,
__be16 sport, __be16 dport,
struct sock *sk)
{
__u8 flow_flags = 0;

if (inet_sk(sk)->transparent)
flow_flags |= 0x01;

flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
protocol, flow_flags, dst, src, dport, sport,
sk->sk_uid);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct rtable *ip_route_connect(struct flowi4 *fl4,
__be32 dst, __be32 src, u32 tos,
int oif, u8 protocol,
__be16 sport, __be16 dport,
struct sock *sk)
{
struct net *net = sock_net(sk);
struct rtable *rt;

ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
sport, dport, sk);

if (!dst || !src) {
rt = __ip_route_output_key(net, fl4);
if (IS_ERR(rt))
return rt;
ip_rt_put(rt);
flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr);
}
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(net, fl4, sk);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable *rt,
__be16 orig_sport, __be16 orig_dport,
__be16 sport, __be16 dport,
struct sock *sk)
{
if (sport != orig_sport || dport != orig_dport) {
fl4->uli.ports.dport = dport;
fl4->uli.ports.sport = sport;
ip_rt_put(rt);
flowi4_update_output(fl4, sk->__sk_common.skc_bound_dev_if,
(((inet_sk(sk)->tos)&0x1E) | sock_flag(sk, SOCK_LOCALROUTE)), fl4->daddr,
fl4->saddr);
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(sock_net(sk), fl4, sk);
}
return rt;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet_iif(const struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);

if (rt && rt->rt_iif)
return rt->rt_iif;

return skb->skb_iif;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip4_dst_hoplimit(const struct dst_entry *dst)
{
int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
struct net *net = dev_net(dst->dev);

if (hoplimit == 0)
hoplimit = net->ipv4.sysctl_ip_default_ttl;
return hoplimit;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *ip_neigh_gw4(struct net_device *dev,
__be32 daddr)
{
struct neighbour *neigh;

neigh = __ipv4_neigh_lookup_noref(dev, ( u32)daddr);
if (__builtin_expect(!!(!neigh), 0))
neigh = __neigh_create(&arp_tbl, &daddr, dev, false);

return neigh;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct neighbour *ip_neigh_for_gw(struct rtable *rt,
struct sk_buff *skb,
bool *is_v6gw)
{
struct net_device *dev = rt->dst.dev;
struct neighbour *neigh;

if (__builtin_expect(!!(rt->rt_gw_family == 2), 1)) {
neigh = ip_neigh_gw4(dev, rt->rt_gw4);
} else if (rt->rt_gw_family == 10) {
neigh = ip_neigh_gw6(dev, &rt->rt_gw6);
*is_v6gw = true;
} else {
neigh = ip_neigh_gw4(dev, ip_hdr(skb)->daddr);
}
return neigh;
}
# 10 "./include/net/lwtunnel.h" 2
# 19 "./include/net/lwtunnel.h"
enum {
LWTUNNEL_XMIT_DONE,
LWTUNNEL_XMIT_CONTINUE,
};


struct lwtunnel_state {
__u16 type;
__u16 flags;
__u16 headroom;
atomic_t refcnt;
int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*orig_input)(struct sk_buff *);
struct callback_head rcu;
__u8 data[];
};

struct lwtunnel_encap_ops {
int (*build_state)(struct net *net, struct nlattr *encap,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack);
void (*destroy_state)(struct lwtunnel_state *lws);
int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*input)(struct sk_buff *skb);
int (*fill_encap)(struct sk_buff *skb,
struct lwtunnel_state *lwtstate);
int (*get_encap_size)(struct lwtunnel_state *lwtstate);
int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
int (*xmit)(struct sk_buff *skb);

struct module *owner;
};
# 148 "./include/net/lwtunnel.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lwtstate_free(struct lwtunnel_state *lws)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct lwtunnel_state *
lwtstate_get(struct lwtunnel_state *lws)
{
return lws;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lwtstate_put(struct lwtunnel_state *lws)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool lwtunnel_xmit_redirect(struct lwtunnel_state *lwtstate)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void lwtunnel_set_redirect(struct dst_entry *dst)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int lwtunnel_headroom(struct lwtunnel_state *lwtstate,
unsigned int mtu)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
unsigned int num)
{
return -95;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
unsigned int num)
{
return -95;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int lwtunnel_valid_encap_type(u16 encap_type,
struct netlink_ext_ack *extack)
{
do { static const char __msg[] = "CONFIG_LWTUNNEL is not enabled in this kernel"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
return -95;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len,
struct netlink_ext_ack *extack)
{



return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int lwtunnel_build_state(struct net *net, u16 encap_type,
struct nlattr *encap,
unsigned int family, const void *cfg,
struct lwtunnel_state **lws,
struct netlink_ext_ack *extack)
{
return -95;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int lwtunnel_fill_encap(struct sk_buff *skb,
struct lwtunnel_state *lwtstate,
int encap_attr, int encap_type_attr)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int lwtunnel_cmp_encap(struct lwtunnel_state *a,
struct lwtunnel_state *b)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return -95;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int lwtunnel_input(struct sk_buff *skb)
{
return -95;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int lwtunnel_xmit(struct sk_buff *skb)
{
return -95;
}
# 27 "./include/net/ip6_route.h" 2



# 1 "./include/net/nexthop.h" 1
# 22 "./include/net/nexthop.h"
struct nexthop;

struct nh_config {
u32 nh_id;

u8 nh_family;
u8 nh_protocol;
u8 nh_blackhole;
u8 nh_fdb;
u32 nh_flags;

int nh_ifindex;
struct net_device *dev;

union {
__be32 ipv4;
struct in6_addr ipv6;
} gw;

struct nlattr *nh_grp;
u16 nh_grp_type;
u16 nh_grp_res_num_buckets;
unsigned long nh_grp_res_idle_timer;
unsigned long nh_grp_res_unbalanced_timer;
bool nh_grp_res_has_num_buckets;
bool nh_grp_res_has_idle_timer;
bool nh_grp_res_has_unbalanced_timer;

struct nlattr *nh_encap;
u16 nh_encap_type;

u32 nlflags;
struct nl_info nlinfo;
};

struct nh_info {
struct hlist_node dev_hash;
struct nexthop *nh_parent;

u8 family;
bool reject_nh;
bool fdb_nh;

union {
struct fib_nh_common fib_nhc;
struct fib_nh fib_nh;
struct fib6_nh fib6_nh;
};
};

struct nh_res_bucket {
struct nh_grp_entry *nh_entry;
atomic_long_t used_time;
unsigned long migrated_time;
bool occupied;
u8 nh_flags;
};

struct nh_res_table {
struct net *net;
u32 nhg_id;
struct delayed_work upkeep_dw;




struct list_head uw_nh_entries;
unsigned long unbalanced_since;

u32 idle_timer;
u32 unbalanced_timer;

u16 num_nh_buckets;
struct nh_res_bucket nh_buckets[];
};

struct nh_grp_entry {
struct nexthop *nh;
u8 weight;

union {
struct {
atomic_t upper_bound;
} hthr;
struct {

struct list_head uw_nh_entry;

u16 count_buckets;
u16 wants_buckets;
} res;
};

struct list_head nh_list;
struct nexthop *nh_parent;
};

struct nh_group {
struct nh_group *spare;
u16 num_nh;
bool is_multipath;
bool hash_threshold;
bool resilient;
bool fdb_nh;
bool has_v4;

struct nh_res_table *res_table;
struct nh_grp_entry nh_entries[];
};

struct nexthop {
struct rb_node rb_node;
struct list_head fi_list;
struct list_head f6i_list;
struct list_head fdb_list;
struct list_head grp_list;
struct net *net;

u32 id;

u8 protocol;
u8 nh_flags;
bool is_group;

refcount_t refcnt;
struct callback_head rcu;

union {
struct nh_info *nh_info;
struct nh_group *nh_grp;
};
};

enum nexthop_event_type {
NEXTHOP_EVENT_DEL,
NEXTHOP_EVENT_REPLACE,
NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
NEXTHOP_EVENT_BUCKET_REPLACE,
};

enum nh_notifier_info_type {
NH_NOTIFIER_INFO_TYPE_SINGLE,
NH_NOTIFIER_INFO_TYPE_GRP,
NH_NOTIFIER_INFO_TYPE_RES_TABLE,
NH_NOTIFIER_INFO_TYPE_RES_BUCKET,
};

struct nh_notifier_single_info {
struct net_device *dev;
u8 gw_family;
union {
__be32 ipv4;
struct in6_addr ipv6;
};
u8 is_reject:1,
is_fdb:1,
has_encap:1;
};

struct nh_notifier_grp_entry_info {
u8 weight;
u32 id;
struct nh_notifier_single_info nh;
};

struct nh_notifier_grp_info {
u16 num_nh;
bool is_fdb;
struct nh_notifier_grp_entry_info nh_entries[];
};

struct nh_notifier_res_bucket_info {
u16 bucket_index;
unsigned int idle_timer_ms;
bool force;
struct nh_notifier_single_info old_nh;
struct nh_notifier_single_info new_nh;
};

struct nh_notifier_res_table_info {
u16 num_nh_buckets;
struct nh_notifier_single_info nhs[];
};

struct nh_notifier_info {
struct net *net;
struct netlink_ext_ack *extack;
u32 id;
enum nh_notifier_info_type type;
union {
struct nh_notifier_single_info *nh;
struct nh_notifier_grp_info *nh_grp;
struct nh_notifier_res_table_info *nh_res_table;
struct nh_notifier_res_bucket_info *nh_res_bucket;
};
};

int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap);
void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
bool offload, bool trap);
void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
unsigned long *activity);


struct nexthop *nexthop_find_by_id(struct net *net, u32 id);
void nexthop_free_rcu(struct callback_head *head);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool nexthop_get(struct nexthop *nh)
{
return refcount_inc_not_zero(&nh->refcnt);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void nexthop_put(struct nexthop *nh)
{
if (refcount_dec_and_test(&nh->refcnt))
call_rcu(&nh->rcu, nexthop_free_rcu);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool nexthop_cmp(const struct nexthop *nh1,
const struct nexthop *nh2)
{
return nh1 == nh2;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool nexthop_is_fdb(const struct nexthop *nh)
{
if (nh->is_group) {
const struct nh_group *nh_grp;

nh_grp = ({ typeof(*(nh->nh_grp)) *__UNIQUE_ID_rcu474 = (typeof(*(nh->nh_grp)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_475(void) ; if (!((sizeof((nh->nh_grp)) == sizeof(char) || sizeof((nh->nh_grp)) == sizeof(short) || sizeof((nh->nh_grp)) == sizeof(int) || sizeof((nh->nh_grp)) == sizeof(long)) || sizeof((nh->nh_grp)) == sizeof(long long))) __compiletime_assert_475(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_grp)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_grp)))) *)&((nh->nh_grp))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_grp)) *)(__UNIQUE_ID_rcu474)); });
return nh_grp->fdb_nh;
} else {
const struct nh_info *nhi;

nhi = ({ typeof(*(nh->nh_info)) *__UNIQUE_ID_rcu476 = (typeof(*(nh->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_477(void) ; if (!((sizeof((nh->nh_info)) == sizeof(char) || sizeof((nh->nh_info)) == sizeof(short) || sizeof((nh->nh_info)) == sizeof(int) || sizeof((nh->nh_info)) == sizeof(long)) || sizeof((nh->nh_info)) == sizeof(long long))) __compiletime_assert_477(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_info)))) *)&((nh->nh_info))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_info)) *)(__UNIQUE_ID_rcu476)); });
return nhi->fdb_nh;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool nexthop_has_v4(const struct nexthop *nh)
{
if (nh->is_group) {
struct nh_group *nh_grp;

nh_grp = ({ typeof(*(nh->nh_grp)) *__UNIQUE_ID_rcu478 = (typeof(*(nh->nh_grp)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_479(void) ; if (!((sizeof((nh->nh_grp)) == sizeof(char) || sizeof((nh->nh_grp)) == sizeof(short) || sizeof((nh->nh_grp)) == sizeof(int) || sizeof((nh->nh_grp)) == sizeof(long)) || sizeof((nh->nh_grp)) == sizeof(long long))) __compiletime_assert_479(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_grp)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_grp)))) *)&((nh->nh_grp))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_grp)) *)(__UNIQUE_ID_rcu478)); });
return nh_grp->has_v4;
}
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool nexthop_is_multipath(const struct nexthop *nh)
{
if (nh->is_group) {
struct nh_group *nh_grp;

nh_grp = ({ typeof(*(nh->nh_grp)) *__UNIQUE_ID_rcu480 = (typeof(*(nh->nh_grp)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_481(void) ; if (!((sizeof((nh->nh_grp)) == sizeof(char) || sizeof((nh->nh_grp)) == sizeof(short) || sizeof((nh->nh_grp)) == sizeof(int) || sizeof((nh->nh_grp)) == sizeof(long)) || sizeof((nh->nh_grp)) == sizeof(long long))) __compiletime_assert_481(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_grp)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_grp)))) *)&((nh->nh_grp))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_grp)) *)(__UNIQUE_ID_rcu480)); });
return nh_grp->is_multipath;
}
return false;
}

struct nexthop *nexthop_select_path(struct nexthop *nh, int hash);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int nexthop_num_path(const struct nexthop *nh)
{
unsigned int rc = 1;

if (nh->is_group) {
struct nh_group *nh_grp;

nh_grp = ({ typeof(*(nh->nh_grp)) *__UNIQUE_ID_rcu482 = (typeof(*(nh->nh_grp)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_483(void) ; if (!((sizeof((nh->nh_grp)) == sizeof(char) || sizeof((nh->nh_grp)) == sizeof(short) || sizeof((nh->nh_grp)) == sizeof(int) || sizeof((nh->nh_grp)) == sizeof(long)) || sizeof((nh->nh_grp)) == sizeof(long long))) __compiletime_assert_483(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_grp)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_grp)))) *)&((nh->nh_grp))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_grp)) *)(__UNIQUE_ID_rcu482)); });
if (nh_grp->is_multipath)
rc = nh_grp->num_nh;
}

return rc;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel)
{



if (nhsel >= nhg->num_nh)
return ((void *)0);

return nhg->nh_entries[nhsel].nh;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
u8 rt_family)
{
struct nh_group *nhg = ({ do { } while (0 && (!((lockdep_rtnl_is_held())))); ; ((typeof(*(nh->nh_grp)) *)((nh->nh_grp))); });
int i;

for (i = 0; i < nhg->num_nh; i++) {
struct nexthop *nhe = nhg->nh_entries[i].nh;
struct nh_info *nhi = ({ typeof(*(nhe->nh_info)) *__UNIQUE_ID_rcu484 = (typeof(*(nhe->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_485(void) ; if (!((sizeof((nhe->nh_info)) == sizeof(char) || sizeof((nhe->nh_info)) == sizeof(short) || sizeof((nhe->nh_info)) == sizeof(int) || sizeof((nhe->nh_info)) == sizeof(long)) || sizeof((nhe->nh_info)) == sizeof(long long))) __compiletime_assert_485(); } while (0); (*(const volatile typeof( _Generic(((nhe->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nhe->nh_info)))) *)&((nhe->nh_info))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nhe->nh_info)) *)(__UNIQUE_ID_rcu484)); });
struct fib_nh_common *nhc = &nhi->fib_nhc;
int weight = nhg->nh_entries[i].weight;

if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)
return -90;
}

return 0;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool nexthop_is_blackhole(const struct nexthop *nh)
{
const struct nh_info *nhi;

if (nh->is_group) {
struct nh_group *nh_grp;

nh_grp = ({ typeof(*(nh->nh_grp)) *__UNIQUE_ID_rcu486 = (typeof(*(nh->nh_grp)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_487(void) ; if (!((sizeof((nh->nh_grp)) == sizeof(char) || sizeof((nh->nh_grp)) == sizeof(short) || sizeof((nh->nh_grp)) == sizeof(int) || sizeof((nh->nh_grp)) == sizeof(long)) || sizeof((nh->nh_grp)) == sizeof(long long))) __compiletime_assert_487(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_grp)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_grp)))) *)&((nh->nh_grp))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_grp)) *)(__UNIQUE_ID_rcu486)); });
if (nh_grp->num_nh > 1)
return false;

nh = nh_grp->nh_entries[0].nh;
}

nhi = ({ typeof(*(nh->nh_info)) *__UNIQUE_ID_rcu488 = (typeof(*(nh->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_489(void) ; if (!((sizeof((nh->nh_info)) == sizeof(char) || sizeof((nh->nh_info)) == sizeof(short) || sizeof((nh->nh_info)) == sizeof(int) || sizeof((nh->nh_info)) == sizeof(long)) || sizeof((nh->nh_info)) == sizeof(long long))) __compiletime_assert_489(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_info)))) *)&((nh->nh_info))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_info)) *)(__UNIQUE_ID_rcu488)); });
return nhi->reject_nh;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void nexthop_path_fib_result(struct fib_result *res, int hash)
{
struct nh_info *nhi;
struct nexthop *nh;

nh = nexthop_select_path(res->fi->nh, hash);
nhi = ({ typeof(*(nh->nh_info)) *__UNIQUE_ID_rcu490 = (typeof(*(nh->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_491(void) ; if (!((sizeof((nh->nh_info)) == sizeof(char) || sizeof((nh->nh_info)) == sizeof(short) || sizeof((nh->nh_info)) == sizeof(int) || sizeof((nh->nh_info)) == sizeof(long)) || sizeof((nh->nh_info)) == sizeof(long long))) __compiletime_assert_491(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_info)))) *)&((nh->nh_info))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_info)) *)(__UNIQUE_ID_rcu490)); });
res->nhc = &nhi->fib_nhc;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel)
{
struct nh_info *nhi;

do { __attribute__((__noreturn__)) extern void __compiletime_assert_492(void) ; if (!(!(__builtin_offsetof(struct fib_nh, nh_common) != 0))) __compiletime_assert_492(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_493(void) ; if (!(!(__builtin_offsetof(struct fib6_nh, nh_common) != 0))) __compiletime_assert_493(); } while (0);

if (nh->is_group) {
struct nh_group *nh_grp;

nh_grp = ({ typeof(*(nh->nh_grp)) *__UNIQUE_ID_rcu494 = (typeof(*(nh->nh_grp)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_495(void) ; if (!((sizeof((nh->nh_grp)) == sizeof(char) || sizeof((nh->nh_grp)) == sizeof(short) || sizeof((nh->nh_grp)) == sizeof(int) || sizeof((nh->nh_grp)) == sizeof(long)) || sizeof((nh->nh_grp)) == sizeof(long long))) __compiletime_assert_495(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_grp)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_grp)))) *)&((nh->nh_grp))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_grp)) *)(__UNIQUE_ID_rcu494)); });
if (nh_grp->is_multipath) {
nh = nexthop_mpath_select(nh_grp, nhsel);
if (!nh)
return ((void *)0);
}
}

nhi = ({ typeof(*(nh->nh_info)) *__UNIQUE_ID_rcu496 = (typeof(*(nh->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_497(void) ; if (!((sizeof((nh->nh_info)) == sizeof(char) || sizeof((nh->nh_info)) == sizeof(short) || sizeof((nh->nh_info)) == sizeof(int) || sizeof((nh->nh_info)) == sizeof(long)) || sizeof((nh->nh_info)) == sizeof(long long))) __compiletime_assert_497(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_info)))) *)&((nh->nh_info))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_info)) *)(__UNIQUE_ID_rcu496)); });
return &nhi->fib_nhc;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct fib_nh_common *nexthop_get_nhc_lookup(const struct nexthop *nh,
int fib_flags,
const struct flowi4 *flp,
int *nhsel)
{
struct nh_info *nhi;

if (nh->is_group) {
struct nh_group *nhg = ({ typeof(*(nh->nh_grp)) *__UNIQUE_ID_rcu498 = (typeof(*(nh->nh_grp)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_499(void) ; if (!((sizeof((nh->nh_grp)) == sizeof(char) || sizeof((nh->nh_grp)) == sizeof(short) || sizeof((nh->nh_grp)) == sizeof(int) || sizeof((nh->nh_grp)) == sizeof(long)) || sizeof((nh->nh_grp)) == sizeof(long long))) __compiletime_assert_499(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_grp)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_grp)))) *)&((nh->nh_grp))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_grp)) *)(__UNIQUE_ID_rcu498)); });
int i;

for (i = 0; i < nhg->num_nh; i++) {
struct nexthop *nhe = nhg->nh_entries[i].nh;

nhi = ({ typeof(*(nhe->nh_info)) *__UNIQUE_ID_rcu500 = (typeof(*(nhe->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_501(void) ; if (!((sizeof((nhe->nh_info)) == sizeof(char) || sizeof((nhe->nh_info)) == sizeof(short) || sizeof((nhe->nh_info)) == sizeof(int) || sizeof((nhe->nh_info)) == sizeof(long)) || sizeof((nhe->nh_info)) == sizeof(long long))) __compiletime_assert_501(); } while (0); (*(const volatile typeof( _Generic(((nhe->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nhe->nh_info)))) *)&((nhe->nh_info))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(nhe->nh_info)) *)(__UNIQUE_ID_rcu500)); });
if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
*nhsel = i;
return &nhi->fib_nhc;
}
}
} else {
nhi = ({ typeof(*(nh->nh_info)) *__UNIQUE_ID_rcu502 = (typeof(*(nh->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_503(void) ; if (!((sizeof((nh->nh_info)) == sizeof(char) || sizeof((nh->nh_info)) == sizeof(short) || sizeof((nh->nh_info)) == sizeof(int) || sizeof((nh->nh_info)) == sizeof(long)) || sizeof((nh->nh_info)) == sizeof(long long))) __compiletime_assert_503(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_info)))) *)&((nh->nh_info))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_info)) *)(__UNIQUE_ID_rcu502)); });
if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
*nhsel = 0;
return &nhi->fib_nhc;
}
}

return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool nexthop_uses_dev(const struct nexthop *nh,
const struct net_device *dev)
{
struct nh_info *nhi;

if (nh->is_group) {
struct nh_group *nhg = ({ typeof(*(nh->nh_grp)) *__UNIQUE_ID_rcu504 = (typeof(*(nh->nh_grp)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_505(void) ; if (!((sizeof((nh->nh_grp)) == sizeof(char) || sizeof((nh->nh_grp)) == sizeof(short) || sizeof((nh->nh_grp)) == sizeof(int) || sizeof((nh->nh_grp)) == sizeof(long)) || sizeof((nh->nh_grp)) == sizeof(long long))) __compiletime_assert_505(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_grp)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_grp)))) *)&((nh->nh_grp))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_grp)) *)(__UNIQUE_ID_rcu504)); });
int i;

for (i = 0; i < nhg->num_nh; i++) {
struct nexthop *nhe = nhg->nh_entries[i].nh;

nhi = ({ typeof(*(nhe->nh_info)) *__UNIQUE_ID_rcu506 = (typeof(*(nhe->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_507(void) ; if (!((sizeof((nhe->nh_info)) == sizeof(char) || sizeof((nhe->nh_info)) == sizeof(short) || sizeof((nhe->nh_info)) == sizeof(int) || sizeof((nhe->nh_info)) == sizeof(long)) || sizeof((nhe->nh_info)) == sizeof(long long))) __compiletime_assert_507(); } while (0); (*(const volatile typeof( _Generic(((nhe->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nhe->nh_info)))) *)&((nhe->nh_info))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(nhe->nh_info)) *)(__UNIQUE_ID_rcu506)); });
if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
return true;
}
} else {
nhi = ({ typeof(*(nh->nh_info)) *__UNIQUE_ID_rcu508 = (typeof(*(nh->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_509(void) ; if (!((sizeof((nh->nh_info)) == sizeof(char) || sizeof((nh->nh_info)) == sizeof(short) || sizeof((nh->nh_info)) == sizeof(int) || sizeof((nh->nh_info)) == sizeof(long)) || sizeof((nh->nh_info)) == sizeof(long long))) __compiletime_assert_509(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_info)))) *)&((nh->nh_info))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_info)) *)(__UNIQUE_ID_rcu508)); });
if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
return true;
}

return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int fib_info_num_path(const struct fib_info *fi)
{
if (__builtin_expect(!!(fi->nh), 0))
return nexthop_num_path(fi->nh);

return fi->fib_nhs;
}

int fib_check_nexthop(struct nexthop *nh, u8 scope,
struct netlink_ext_ack *extack);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct fib_nh_common *fib_info_nhc(struct fib_info *fi, int nhsel)
{
if (__builtin_expect(!!(fi->nh), 0))
return nexthop_fib_nhc(fi->nh, nhsel);

return &fi->fib_nh[nhsel].nh_common;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel)
{
({ int __ret_warn_on = !!(fi->nh); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/nexthop.h"), "i" (468), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });

return &fi->fib_nh[nhsel];
}




int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
struct netlink_ext_ack *extack);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
{
struct nh_info *nhi;

if (nh->is_group) {
struct nh_group *nh_grp;

nh_grp = ({ typeof(*(nh->nh_grp)) *__UNIQUE_ID_rcu510 = (typeof(*(nh->nh_grp)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_511(void) ; if (!((sizeof((nh->nh_grp)) == sizeof(char) || sizeof((nh->nh_grp)) == sizeof(short) || sizeof((nh->nh_grp)) == sizeof(int) || sizeof((nh->nh_grp)) == sizeof(long)) || sizeof((nh->nh_grp)) == sizeof(long long))) __compiletime_assert_511(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_grp)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_grp)))) *)&((nh->nh_grp))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_grp)) *)(__UNIQUE_ID_rcu510)); });
nh = nexthop_mpath_select(nh_grp, 0);
if (!nh)
return ((void *)0);
}

nhi = ({ typeof(*(nh->nh_info)) *__UNIQUE_ID_rcu512 = (typeof(*(nh->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_513(void) ; if (!((sizeof((nh->nh_info)) == sizeof(char) || sizeof((nh->nh_info)) == sizeof(short) || sizeof((nh->nh_info)) == sizeof(int) || sizeof((nh->nh_info)) == sizeof(long)) || sizeof((nh->nh_info)) == sizeof(long long))) __compiletime_assert_513(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_info)))) *)&((nh->nh_info))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_info)) *)(__UNIQUE_ID_rcu512)); });
if (nhi->family == 10)
return &nhi->fib6_nh;

return ((void *)0);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh)
{
struct nh_info *nhi;

if (nh->is_group) {
struct nh_group *nh_grp;

nh_grp = ({ typeof(*(nh->nh_grp)) *__UNIQUE_ID_rcu514 = (typeof(*(nh->nh_grp)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_515(void) ; if (!((sizeof((nh->nh_grp)) == sizeof(char) || sizeof((nh->nh_grp)) == sizeof(short) || sizeof((nh->nh_grp)) == sizeof(int) || sizeof((nh->nh_grp)) == sizeof(long)) || sizeof((nh->nh_grp)) == sizeof(long long))) __compiletime_assert_515(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_grp)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_grp)))) *)&((nh->nh_grp))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_bh_held()))); ; ((typeof(*(nh->nh_grp)) *)(__UNIQUE_ID_rcu514)); });
nh = nexthop_mpath_select(nh_grp, 0);
if (!nh)
return ((void *)0);
}

nhi = ({ typeof(*(nh->nh_info)) *__UNIQUE_ID_rcu516 = (typeof(*(nh->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_517(void) ; if (!((sizeof((nh->nh_info)) == sizeof(char) || sizeof((nh->nh_info)) == sizeof(short) || sizeof((nh->nh_info)) == sizeof(int) || sizeof((nh->nh_info)) == sizeof(long)) || sizeof((nh->nh_info)) == sizeof(long long))) __compiletime_assert_517(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_info)))) *)&((nh->nh_info))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_bh_held()))); ; ((typeof(*(nh->nh_info)) *)(__UNIQUE_ID_rcu516)); });
if (nhi->family == 10)
return &nhi->fib6_nh;

return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
{
struct fib6_nh *fib6_nh;

fib6_nh = f6i->nh ? nexthop_fib6_nh(f6i->nh) : f6i->fib6_nh;
return fib6_nh->nh_common.nhc_dev;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void nexthop_path_fib6_result(struct fib6_result *res, int hash)
{
struct nexthop *nh = res->f6i->nh;
struct nh_info *nhi;

nh = nexthop_select_path(nh, hash);

nhi = ({ typeof(*(nh->nh_info)) *__UNIQUE_ID_rcu518 = (typeof(*(nh->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_519(void) ; if (!((sizeof((nh->nh_info)) == sizeof(char) || sizeof((nh->nh_info)) == sizeof(short) || sizeof((nh->nh_info)) == sizeof(int) || sizeof((nh->nh_info)) == sizeof(long)) || sizeof((nh->nh_info)) == sizeof(long long))) __compiletime_assert_519(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_info)))) *)&((nh->nh_info))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_info)) *)(__UNIQUE_ID_rcu518)); });
if (nhi->reject_nh) {
res->fib6_type = RTN_BLACKHOLE;
res->fib6_flags |= 0x0200;
res->nh = nexthop_fib6_nh(nh);
} else {
res->nh = &nhi->fib6_nh;
}
}

int nexthop_for_each_fib6_nh(struct nexthop *nh,
int (*cb)(struct fib6_nh *nh, void *arg),
void *arg);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int nexthop_get_family(struct nexthop *nh)
{
struct nh_info *nhi = ({ typeof(*(nh->nh_info)) *__UNIQUE_ID_rcu520 = (typeof(*(nh->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_521(void) ; if (!((sizeof((nh->nh_info)) == sizeof(char) || sizeof((nh->nh_info)) == sizeof(short) || sizeof((nh->nh_info)) == sizeof(int) || sizeof((nh->nh_info)) == sizeof(long)) || sizeof((nh->nh_info)) == sizeof(long long))) __compiletime_assert_521(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_info)))) *)&((nh->nh_info))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_info)) *)(__UNIQUE_ID_rcu520)); });

return nhi->family;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct fib_nh_common *nexthop_fdb_nhc(struct nexthop *nh)
{
struct nh_info *nhi = ({ typeof(*(nh->nh_info)) *__UNIQUE_ID_rcu522 = (typeof(*(nh->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_523(void) ; if (!((sizeof((nh->nh_info)) == sizeof(char) || sizeof((nh->nh_info)) == sizeof(short) || sizeof((nh->nh_info)) == sizeof(int) || sizeof((nh->nh_info)) == sizeof(long)) || sizeof((nh->nh_info)) == sizeof(long long))) __compiletime_assert_523(); } while (0); (*(const volatile typeof( _Generic(((nh->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->nh_info)))) *)&((nh->nh_info))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(nh->nh_info)) *)(__UNIQUE_ID_rcu522)); });

return &nhi->fib_nhc;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct fib_nh_common *nexthop_path_fdb_result(struct nexthop *nh,
int hash)
{
struct nh_info *nhi;
struct nexthop *nhp;

nhp = nexthop_select_path(nh, hash);
if (__builtin_expect(!!(!nhp), 0))
return ((void *)0);
nhi = ({ typeof(*(nhp->nh_info)) *__UNIQUE_ID_rcu524 = (typeof(*(nhp->nh_info)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_525(void) ; if (!((sizeof((nhp->nh_info)) == sizeof(char) || sizeof((nhp->nh_info)) == sizeof(short) || sizeof((nhp->nh_info)) == sizeof(int) || sizeof((nhp->nh_info)) == sizeof(long)) || sizeof((nhp->nh_info)) == sizeof(long long))) __compiletime_assert_525(); } while (0); (*(const volatile typeof( _Generic(((nhp->nh_info)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nhp->nh_info)))) *)&((nhp->nh_info))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(nhp->nh_info)) *)(__UNIQUE_ID_rcu524)); });
return &nhi->fib_nhc;
}
# 31 "./include/net/ip6_route.h" 2
# 54 "./include/net/ip6_route.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rt6_srcprefs2flags(unsigned int srcprefs)
{

return srcprefs << 3;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int rt6_flags2srcprefs(int flags)
{
return (flags >> 3) & 7;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool rt6_need_strict(const struct in6_addr *daddr)
{
return ipv6_addr_type(daddr) &
(0x0002U | 0x0020U | 0x0010U);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
{

return !(f6i->fib6_flags & 0x00040000) && !f6i->nh &&
f6i->fib6_nh->nh_common.nhc_gw_family;
}

void ip6_route_input(struct sk_buff *skb);
struct dst_entry *ip6_route_input_lookup(struct net *net,
struct net_device *dev,
struct flowi6 *fl6,
const struct sk_buff *skb, int flags);

struct dst_entry *ip6_route_output_flags_noref(struct net *net,
const struct sock *sk,
struct flowi6 *fl6, int flags);

struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
struct flowi6 *fl6, int flags);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dst_entry *ip6_route_output(struct net *net,
const struct sock *sk,
struct flowi6 *fl6)
{
return ip6_route_output_flags(net, sk, fl6, 0);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip6_rt_put_flags(struct rt6_info *rt, int flags)
{
if (!(flags & 0x00000080) ||
!list_empty(&rt->rt6i_uncached))
ip6_rt_put(rt);
}

struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
const struct sk_buff *skb, int flags);
struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
int ifindex, struct flowi6 *fl6,
const struct sk_buff *skb, int flags);

void ip6_route_init_special_entries(void);
int ip6_route_init(void);
void ip6_route_cleanup(void);

int ipv6_route_ioctl(struct net *net, unsigned int cmd,
struct in6_rtmsg *rtmsg);

int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
int ip6_ins_rt(struct net *net, struct fib6_info *f6i);
int ip6_del_rt(struct net *net, struct fib6_info *f6i, bool skip_notify);

void rt6_flush_exceptions(struct fib6_info *f6i);
void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args,
unsigned long now);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i,
const struct in6_addr *daddr,
unsigned int prefs,
struct in6_addr *saddr)
{
int err = 0;

if (f6i && f6i->fib6_prefsrc.plen) {
*saddr = f6i->fib6_prefsrc.addr;
} else {
struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : ((void *)0);

err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr);
}

return err;
}

struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
const struct in6_addr *saddr, int oif,
const struct sk_buff *skb, int flags);
u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
const struct sk_buff *skb, struct flow_keys *hkeys);

struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6);

void fib6_force_start_gc(struct net *net);

struct fib6_info *addrconf_f6i_alloc(struct net *net, struct inet6_dev *idev,
const struct in6_addr *addr, bool anycast,
gfp_t gfp_flags);

struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
int flags);





struct fib6_info *rt6_get_dflt_router(struct net *net,
const struct in6_addr *addr,
struct net_device *dev);
struct fib6_info *rt6_add_dflt_router(struct net *net,
const struct in6_addr *gwaddr,
struct net_device *dev, unsigned int pref,
u32 defrtr_usr_metric);

void rt6_purge_dflt_routers(struct net *net);

int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
const struct in6_addr *gwaddr);

void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
u32 mark, kuid_t uid);
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
kuid_t uid);
void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif);
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);

struct netlink_callback;

struct rt6_rtnl_dump_arg {
struct sk_buff *skb;
struct netlink_callback *cb;
struct net *net;
struct fib_dump_filter filter;
};

int rt6_dump_route(struct fib6_info *f6i, void *p_arg, unsigned int skip);
void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
void rt6_sync_up(struct net_device *dev, unsigned char nh_flags);
void rt6_disable_ip(struct net_device *dev, unsigned long event);
void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
void rt6_multipath_rebalance(struct fib6_info *f6i);

void rt6_uncached_list_add(struct rt6_info *rt);
void rt6_uncached_list_del(struct rt6_info *rt);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
{
const struct dst_entry *dst = skb_dst(skb);
const struct rt6_info *rt6 = ((void *)0);

if (dst)
rt6 = ({ void *__mptr = (void *)(dst); _Static_assert(__builtin_types_compatible_p(typeof(*(dst)), typeof(((struct rt6_info *)0)->dst)) || __builtin_types_compatible_p(typeof(*(dst)), typeof(void)), "pointer type mismatch in container_of()"); ((struct rt6_info *)(__mptr - __builtin_offsetof(struct rt6_info, dst))); });

return rt6;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
struct ipv6_pinfo *np = inet6_sk(sk);

np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst);
sk_setup_caps(sk, dst);
np->daddr_cache = daddr;



}

void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
const struct flowi6 *fl6);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_unicast_destination(const struct sk_buff *skb)
{
struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);

return rt->rt6i_flags & 0x80000000;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv6_anycast_destination(const struct dst_entry *dst,
const struct in6_addr *daddr)
{
struct rt6_info *rt = (struct rt6_info *)dst;

return rt->rt6i_flags & 0x00100000 ||
(rt->rt6i_dst.plen < 127 &&
!(rt->rt6i_flags & (0x0002 | 0x00200000)) &&
ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
}

int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb)
{
const struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : ((void *)0);
const struct dst_entry *dst = skb_dst(skb);
unsigned int mtu;

if (np && np->pmtudisc >= 3) {
mtu = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_526(void) ; if (!((sizeof(dst->dev->mtu) == sizeof(char) || sizeof(dst->dev->mtu) == sizeof(short) || sizeof(dst->dev->mtu) == sizeof(int) || sizeof(dst->dev->mtu) == sizeof(long)) || sizeof(dst->dev->mtu) == sizeof(long long))) __compiletime_assert_526(); } while (0); (*(const volatile typeof( _Generic((dst->dev->mtu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (dst->dev->mtu))) *)&(dst->dev->mtu)); });
mtu -= lwtunnel_headroom(dst->lwtstate, mtu);
} else {
mtu = dst_mtu(dst);
}
return mtu;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ip6_sk_accept_pmtu(const struct sock *sk)
{
return inet6_sk(sk)->pmtudisc != 4 &&
inet6_sk(sk)->pmtudisc != 5;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ip6_sk_ignore_df(const struct sock *sk)
{
return inet6_sk(sk)->pmtudisc < 2 ||
inet6_sk(sk)->pmtudisc == 5;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct in6_addr *rt6_nexthop(const struct rt6_info *rt,
const struct in6_addr *daddr)
{
if (rt->rt6i_flags & 0x0002)
return &rt->rt6i_gateway;
else if (__builtin_expect(!!(rt->rt6i_flags & 0x01000000), 0))
return &rt->rt6i_dst.addr;
else
return daddr;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *b)
{
struct fib6_nh *nha, *nhb;

if (a->nh || b->nh)
return nexthop_cmp(a->nh, b->nh);

nha = a->fib6_nh;
nhb = b->fib6_nh;
return nha->nh_common.nhc_dev == nhb->nh_common.nhc_dev &&
ipv6_addr_equal(&nha->nh_common.nhc_gw.ipv6, &nhb->nh_common.nhc_gw.ipv6) &&
!lwtunnel_cmp_encap(nha->nh_common.nhc_lwtstate, nhb->nh_common.nhc_lwtstate);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst,
bool forwarding)
{
struct inet6_dev *idev;
unsigned int mtu;

if (!forwarding || dst_metric_locked(dst, RTAX_MTU)) {
mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
goto out;
}

mtu = 1280;
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
if (idev)
mtu = idev->cnf.mtu6;
rcu_read_unlock();

out:
return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}

u32 ip6_mtu_from_fib6(const struct fib6_result *res,
const struct in6_addr *daddr,
const struct in6_addr *saddr);

struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
struct net_device *dev, struct sk_buff *skb,
const void *daddr);
# 50 "net/ipv6/route.c" 2


# 1 "./include/net/tcp.h" 1
# 32 "./include/net/tcp.h"
# 1 "./include/net/inet_hashtables.h" 1
# 38 "./include/net/inet_hashtables.h"
struct inet_ehash_bucket {
struct hlist_nulls_head chain;
};
# 76 "./include/net/inet_hashtables.h"
struct inet_bind_bucket {
possible_net_t ib_net;
int l3mdev;
unsigned short port;
signed char fastreuse;
signed char fastreuseport;
kuid_t fastuid;

struct in6_addr fast_v6_rcv_saddr;

__be32 fast_rcv_saddr;
unsigned short fast_sk_family;
bool fast_ipv6_only;
struct hlist_node node;
struct hlist_head owners;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net *ib_net(struct inet_bind_bucket *ib)
{
return read_pnet(&ib->ib_net);
}




struct inet_bind_hashbucket {
spinlock_t lock;
struct hlist_head chain;
};







struct inet_listen_hashbucket {
spinlock_t lock;
unsigned int count;
union {
struct hlist_head head;
struct hlist_nulls_head nulls_head;
};
};




struct inet_hashinfo {






struct inet_ehash_bucket *ehash;
spinlock_t *ehash_locks;
unsigned int ehash_mask;
unsigned int ehash_locks_mask;




struct kmem_cache *bind_bucket_cachep;
struct inet_bind_hashbucket *bhash;
unsigned int bhash_size;


unsigned int lhash2_mask;
struct inet_listen_hashbucket *lhash2;
# 159 "./include/net/inet_hashtables.h"
struct inet_listen_hashbucket listening_hash[32]
__attribute__((__aligned__((1 << 6))));
};
# 172 "./include/net/inet_hashtables.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inet_listen_hashbucket *
inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
{
return &h->lhash2[hash & h->lhash2_mask];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct inet_ehash_bucket *inet_ehash_bucket(
struct inet_hashinfo *hashinfo,
unsigned int hash)
{
return &hashinfo->ehash[hash & hashinfo->ehash_mask];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) spinlock_t *inet_ehash_lockp(
struct inet_hashinfo *hashinfo,
unsigned int hash)
{
return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
}

int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_hashinfo2_free_mod(struct inet_hashinfo *h)
{
kfree(h->lhash2);
h->lhash2 = ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
{
kvfree(hashinfo->ehash_locks);
hashinfo->ehash_locks = ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
int dif, int sdif)
{




return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);

}

struct inet_bind_bucket *
inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
struct inet_bind_hashbucket *head,
const unsigned short snum, int l3mdev);
void inet_bind_bucket_destroy(struct kmem_cache *cachep,
struct inet_bind_bucket *tb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 inet_bhashfn(const struct net *net, const __u16 lport,
const u32 bhash_size)
{
return (lport + net_hash_mix(net)) & (bhash_size - 1);
}

void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
const unsigned short snum);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 inet_lhashfn(const struct net *net, const unsigned short num)
{
return (num + net_hash_mix(net)) & (32 - 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet_sk_listen_hashfn(const struct sock *sk)
{
return inet_lhashfn(sock_net(sk), inet_sk(sk)->sk.__sk_common.skc_num);
}


int __inet_inherit_port(const struct sock *sk, struct sock *child);

void inet_put_port(struct sock *sk);

void inet_hashinfo_init(struct inet_hashinfo *h);
void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
unsigned long numentries, int scale,
unsigned long low_limit,
unsigned long high_limit);
int inet_hashinfo2_init_mod(struct inet_hashinfo *h);

bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
bool *found_dup_sk);
int __inet_hash(struct sock *sk, struct sock *osk);
int inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);

struct sock *__inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr,
const unsigned short hnum,
const int dif, const int sdif);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif, int sdif)
{
return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
daddr, (__builtin_constant_p((__u16)(( __u16)(__be16)(dport))) ? ((__u16)( (((__u16)(( __u16)(__be16)(dport)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(dport)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(dport))), dif, sdif);
}
# 332 "./include/net/inet_hashtables.h"
struct sock *__inet_lookup_established(struct net *net,
struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 hnum,
const int dif, const int sdif);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *
inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const __be16 dport,
const int dif)
{
return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
(__builtin_constant_p((__u16)(( __u16)(__be16)(dport))) ? ((__u16)( (((__u16)(( __u16)(__be16)(dport)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(dport)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(dport))), dif, 0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *__inet_lookup(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const __be16 dport,
const int dif, const int sdif,
bool *refcounted)
{
u16 hnum = (__builtin_constant_p((__u16)(( __u16)(__be16)(dport))) ? ((__u16)( (((__u16)(( __u16)(__be16)(dport)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(dport)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(dport)));
struct sock *sk;

sk = __inet_lookup_established(net, hashinfo, saddr, sport,
daddr, hnum, dif, sdif);
*refcounted = true;
if (sk)
return sk;
*refcounted = false;
return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
sport, daddr, hnum, dif, sdif);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *inet_lookup(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const __be16 dport,
const int dif)
{
struct sock *sk;
bool refcounted;

sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
dport, dif, 0, &refcounted);

if (sk && !refcounted && !refcount_inc_not_zero(&sk->__sk_common.skc_refcnt))
sk = ((void *)0);
return sk;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
struct sk_buff *skb,
int doff,
const __be16 sport,
const __be16 dport,
const int sdif,
bool *refcounted)
{
struct sock *sk = skb_steal_sock(skb, refcounted);
const struct iphdr *iph = ip_hdr(skb);

if (sk)
return sk;

return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
doff, iph->saddr, sport,
iph->daddr, dport, inet_iif(skb), sdif,
refcounted);
}

u32 inet6_ehashfn(const struct net *net,
const struct in6_addr *laddr, const u16 lport,
const struct in6_addr *faddr, const __be16 fport);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_daddr_set(struct sock *sk, __be32 addr)
{
sk->__sk_common.skc_daddr = addr;

ipv6_addr_set_v4mapped(addr, &sk->__sk_common.skc_v6_daddr);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
{
sk->__sk_common.skc_rcv_saddr = addr;

ipv6_addr_set_v4mapped(addr, &sk->__sk_common.skc_v6_rcv_saddr);

}

int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u64 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16,
struct inet_timewait_sock **));

int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
# 33 "./include/net/tcp.h" 2


# 1 "./include/net/sock_reuseport.h" 1




# 1 "./include/linux/filter.h" 1








# 1 "./include/linux/bpf.h" 1
# 20 "./include/linux/bpf.h"
# 1 "./include/linux/kallsyms.h" 1
# 24 "./include/linux/kallsyms.h"
struct cred;
struct module;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int is_kernel_text(unsigned long addr)
{
if (__is_kernel_text(addr))
return 1;
return in_gate_area_no_mm(addr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int is_kernel(unsigned long addr)
{
if (__is_kernel(addr))
return 1;
return in_gate_area_no_mm(addr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int is_ksym_addr(unsigned long addr)
{
if (1)
return is_kernel(addr);

return is_kernel_text(addr) || is_kernel_inittext(addr);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *dereference_symbol_descriptor(void *ptr)
{
# 65 "./include/linux/kallsyms.h"
return ptr;
}

int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
unsigned long),
void *data);



unsigned long kallsyms_lookup_name(const char *name);

extern int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset);


const char *kallsyms_lookup(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset,
char **modname, char *namebuf);


extern int sprint_symbol(char *buffer, unsigned long address);
extern int sprint_symbol_build_id(char *buffer, unsigned long address);
extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
extern int sprint_backtrace(char *buffer, unsigned long address);
extern int sprint_backtrace_build_id(char *buffer, unsigned long address);

int lookup_symbol_name(unsigned long addr, char *symname);
int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);


extern bool kallsyms_show_value(const struct cred *cred);
# 168 "./include/linux/kallsyms.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void print_ip_sym(const char *loglvl, unsigned long ip)
{
({ do {} while (0); _printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip); });
}
# 21 "./include/linux/bpf.h" 2

# 1 "./include/linux/sched/mm.h" 1
# 10 "./include/linux/sched/mm.h"
# 1 "./include/linux/sync_core.h" 1
# 15 "./include/linux/sync_core.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sync_core_before_usermode(void)
{
}
# 11 "./include/linux/sched/mm.h" 2
# 1 "./include/linux/ioasid.h" 1








typedef unsigned int ioasid_t;
typedef ioasid_t (*ioasid_alloc_fn_t)(ioasid_t min, ioasid_t max, void *data);
typedef void (*ioasid_free_fn_t)(ioasid_t ioasid, void *data);

struct ioasid_set {
int dummy;
};
# 25 "./include/linux/ioasid.h"
struct ioasid_allocator_ops {
ioasid_alloc_fn_t alloc;
ioasid_free_fn_t free;
struct list_head list;
void *pdata;
};
# 49 "./include/linux/ioasid.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min,
ioasid_t max, void *private)
{
return ((ioasid_t)-1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ioasid_free(ioasid_t ioasid) { }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
bool (*getter)(void *))
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ioasid_register_allocator(struct ioasid_allocator_ops *allocator)
{
return -524;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ioasid_set_data(ioasid_t ioasid, void *data)
{
return -524;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pasid_valid(ioasid_t ioasid)
{
return false;
}
# 12 "./include/linux/sched/mm.h" 2




extern struct mm_struct *mm_alloc(void);
# 35 "./include/linux/sched/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmgrab(struct mm_struct *mm)
{
atomic_inc(&mm->mm_count);
}

extern void __mmdrop(struct mm_struct *mm);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmdrop(struct mm_struct *mm)
{





if (__builtin_expect(!!(atomic_dec_and_test(&mm->mm_count)), 0))
__mmdrop(mm);
}
# 76 "./include/linux/sched/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmdrop_sched(struct mm_struct *mm)
{
mmdrop(mm);
}
# 98 "./include/linux/sched/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mmget(struct mm_struct *mm)
{
atomic_inc(&mm->mm_users);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mmget_not_zero(struct mm_struct *mm)
{
return atomic_inc_not_zero(&mm->mm_users);
}


extern void mmput(struct mm_struct *);




void mmput_async(struct mm_struct *);



extern struct mm_struct *get_task_mm(struct task_struct *task);





extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);

extern void exit_mm_release(struct task_struct *, struct mm_struct *);

extern void exec_mm_release(struct task_struct *, struct mm_struct *);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mm_update_next_owner(struct mm_struct *mm)
{
}
# 147 "./include/linux/sched/mm.h"
extern void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool in_vfork(struct task_struct *tsk)
{
bool ret;
# 180 "./include/linux/sched/mm.h"
rcu_read_lock();
ret = tsk->vfork_done &&
({ typeof(*(tsk->real_parent)) *__UNIQUE_ID_rcu527 = (typeof(*(tsk->real_parent)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_528(void) ; if (!((sizeof((tsk->real_parent)) == sizeof(char) || sizeof((tsk->real_parent)) == sizeof(short) || sizeof((tsk->real_parent)) == sizeof(int) || sizeof((tsk->real_parent)) == sizeof(long)) || sizeof((tsk->real_parent)) == sizeof(long long))) __compiletime_assert_528(); } while (0); (*(const volatile typeof( _Generic(((tsk->real_parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tsk->real_parent)))) *)&((tsk->real_parent))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(tsk->real_parent)) *)(__UNIQUE_ID_rcu527)); })->mm == tsk->mm;
rcu_read_unlock();

return ret;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) gfp_t current_gfp_context(gfp_t flags)
{
unsigned int pflags = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_529(void) ; if (!((sizeof(get_current()->flags) == sizeof(char) || sizeof(get_current()->flags) == sizeof(short) || sizeof(get_current()->flags) == sizeof(int) || sizeof(get_current()->flags) == sizeof(long)) || sizeof(get_current()->flags) == sizeof(long long))) __compiletime_assert_529(); } while (0); (*(const volatile typeof( _Generic((get_current()->flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (get_current()->flags))) *)&(get_current()->flags)); });

if (__builtin_expect(!!(pflags & (0x00080000 | 0x00040000 | 0x10000000)), 0)) {




if (pflags & 0x00080000)
flags &= ~((( gfp_t)0x40u) | (( gfp_t)0x80u));
else if (pflags & 0x00040000)
flags &= ~(( gfp_t)0x80u);

if (pflags & 0x10000000)
flags &= ~(( gfp_t)0x08u);
}
return flags;
}


extern void __fs_reclaim_acquire(unsigned long ip);
extern void __fs_reclaim_release(unsigned long ip);
extern void fs_reclaim_acquire(gfp_t gfp_mask);
extern void fs_reclaim_release(gfp_t gfp_mask);
# 233 "./include/linux/sched/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memalloc_retry_wait(gfp_t gfp_flags)
{




do { do { ({ int __ret_warn_on = !!((((0x0002)) & (0x0004 | 0x0008 | 0x0040 | 0x0080))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/sched/mm.h"), "i" (239), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); get_current()->task_state_change = ({ __label__ __here; __here: (unsigned long)&&__here; }); } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_530(void) ; if (!((sizeof(get_current()->__state) == sizeof(char) || sizeof(get_current()->__state) == sizeof(short) || sizeof(get_current()->__state) == sizeof(int) || sizeof(get_current()->__state) == sizeof(long)) || sizeof(get_current()->__state) == sizeof(long long))) __compiletime_assert_530(); } while (0); do { *(volatile typeof(get_current()->__state) *)&(get_current()->__state) = ((0x0002)); } while (0); } while (0); } while (0);
gfp_flags = current_gfp_context(gfp_flags);
if (gfpflags_allow_blocking(gfp_flags) &&
!(gfp_flags & (( gfp_t)0x10000u)))

io_schedule_timeout(1);
else



io_schedule_timeout(100/50);
}
# 260 "./include/linux/sched/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void might_alloc(gfp_t gfp_mask)
{
fs_reclaim_acquire(gfp_mask);
fs_reclaim_release(gfp_mask);

do { if (gfpflags_allow_blocking(gfp_mask)) do { __might_sleep("include/linux/sched/mm.h", 265); __cond_resched(); } while (0); } while (0);
}
# 279 "./include/linux/sched/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int memalloc_noio_save(void)
{
unsigned int flags = get_current()->flags & 0x00080000;
get_current()->flags |= 0x00080000;
return flags;
}
# 294 "./include/linux/sched/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memalloc_noio_restore(unsigned int flags)
{
get_current()->flags = (get_current()->flags & ~0x00080000) | flags;
}
# 310 "./include/linux/sched/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int memalloc_nofs_save(void)
{
unsigned int flags = get_current()->flags & 0x00040000;
get_current()->flags |= 0x00040000;
return flags;
}
# 325 "./include/linux/sched/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memalloc_nofs_restore(unsigned int flags)
{
get_current()->flags = (get_current()->flags & ~0x00040000) | flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int memalloc_noreclaim_save(void)
{
unsigned int flags = get_current()->flags & 0x00000800;
get_current()->flags |= 0x00000800;
return flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memalloc_noreclaim_restore(unsigned int flags)
{
get_current()->flags = (get_current()->flags & ~0x00000800) | flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int memalloc_pin_save(void)
{
unsigned int flags = get_current()->flags & 0x10000000;

get_current()->flags |= 0x10000000;
return flags;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void memalloc_pin_restore(unsigned int flags)
{
get_current()->flags = (get_current()->flags & ~0x10000000) | flags;
}
# 384 "./include/linux/sched/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct mem_cgroup *
set_active_memcg(struct mem_cgroup *memcg)
{
return ((void *)0);
}



enum {
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
};

enum {
MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
MEMBARRIER_FLAG_RSEQ = (1U << 1),
};





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
{
if (get_current()->mm != mm)
return;
if (__builtin_expect(!!(!(atomic_read(&mm->membarrier_state) & MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)), 1))

return;
sync_core_before_usermode();
}

extern void membarrier_exec_mmap(struct mm_struct *mm);

extern void membarrier_update_current_mm(struct mm_struct *next_mm);
# 465 "./include/linux/sched/mm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mm_pasid_init(struct mm_struct *mm) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mm_pasid_set(struct mm_struct *mm, u32 pasid) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mm_pasid_drop(struct mm_struct *mm) {}
# 23 "./include/linux/bpf.h" 2


# 1 "./include/linux/bpfptr.h" 1








typedef sockptr_t bpfptr_t;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpfptr_is_kernel(bpfptr_t bpfptr)
{
return bpfptr.is_kernel;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bpfptr_t KERNEL_BPFPTR(void *p)
{
return (bpfptr_t) { .kernel = p, .is_kernel = true };
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bpfptr_t USER_BPFPTR(void *p)
{
return (bpfptr_t) { .user = p };
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bpfptr_t make_bpfptr(u64 addr, bool is_kernel)
{
if (is_kernel)
return KERNEL_BPFPTR((void*) (uintptr_t) addr);
else
return USER_BPFPTR(( { ({ u64 __dummy; typeof((addr)) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(addr); } ));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpfptr_is_null(bpfptr_t bpfptr)
{
if (bpfptr_is_kernel(bpfptr))
return !bpfptr.kernel;
return !bpfptr.user;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpfptr_add(bpfptr_t *bpfptr, size_t val)
{
if (bpfptr_is_kernel(*bpfptr))
bpfptr->kernel += val;
else
bpfptr->user += val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int copy_from_bpfptr_offset(void *dst, bpfptr_t src,
size_t offset, size_t size)
{
return copy_from_sockptr_offset(dst, (sockptr_t) src, offset, size);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size)
{
return copy_from_bpfptr_offset(dst, src, 0, size);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
const void *src, size_t size)
{
return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *kvmemdup_bpfptr(bpfptr_t src, size_t len)
{
void *p = kvmalloc(len, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( gfp_t)0x100000u)) | (( gfp_t)0x2000u));

if (!p)
return ERR_PTR(-12);
if (copy_from_bpfptr(p, src, len)) {
kvfree(p);
return ERR_PTR(-14);
}
return p;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
{
return strncpy_from_sockptr(dst, (sockptr_t) src, count);
}
# 26 "./include/linux/bpf.h" 2

struct bpf_verifier_env;
struct bpf_verifier_log;
struct perf_event;
struct bpf_prog;
struct bpf_prog_aux;
struct bpf_map;
struct sock;
struct seq_file;
struct btf;
struct btf_type;
struct exception_table_entry;
struct seq_operations;
struct bpf_iter_aux_info;
struct bpf_local_storage;
struct bpf_local_storage_map;
struct kobject;
struct mem_cgroup;
struct module;
struct bpf_func_state;

extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
extern struct kobject *btf_kobj;

typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
struct bpf_iter_seq_info {
const struct seq_operations *seq_ops;
bpf_iter_init_seq_priv_t init_seq_private;
bpf_iter_fini_seq_priv_t fini_seq_private;
u32 seq_priv_size;
};


struct bpf_map_ops {

int (*map_alloc_check)(union bpf_attr *attr);
struct bpf_map *(*map_alloc)(union bpf_attr *attr);
void (*map_release)(struct bpf_map *map, struct file *map_file);
void (*map_free)(struct bpf_map *map);
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
void (*map_release_uref)(struct bpf_map *map);
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr *uattr);
int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
void *value, u64 flags);
int (*map_lookup_and_delete_batch)(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr *uattr);
int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr *uattr);
int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr *uattr);


void *(*map_lookup_elem)(struct bpf_map *map, void *key);
int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_map *map, void *key);
int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
int (*map_pop_elem)(struct bpf_map *map, void *value);
int (*map_peek_elem)(struct bpf_map *map, void *value);


void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
void (*map_fd_put_ptr)(void *ptr);
int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
u32 (*map_fd_sys_lookup_elem)(void *ptr);
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
struct seq_file *m);
int (*map_check_btf)(const struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);


int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
struct bpf_prog *new);


int (*map_direct_value_addr)(const struct bpf_map *map,
u64 *imm, u32 off);
int (*map_direct_value_meta)(const struct bpf_map *map,
u64 imm, u32 *off);
int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
struct poll_table_struct *pts);


int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
void *owner, u32 size);
void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
void *owner, u32 size);
struct bpf_local_storage ** (*map_owner_storage_ptr)(void *owner);


int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
# 139 "./include/linux/bpf.h"
bool (*map_meta_equal)(const struct bpf_map *meta0,
const struct bpf_map *meta1);


int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee);
int (*map_for_each_callback)(struct bpf_map *map,
bpf_callback_t callback_fn,
void *callback_ctx, u64 flags);


const char * const map_btf_name;
int *map_btf_id;


const struct bpf_iter_seq_info *iter_seq_info;
};

struct bpf_map {



const struct bpf_map_ops *ops __attribute__((__aligned__((1 << 6))));
struct bpf_map *inner_map_meta;



enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
u64 map_extra;
u32 map_flags;
int spin_lock_off;
int timer_off;
u32 id;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
u32 btf_vmlinux_value_type_id;
struct btf *btf;



char name[16U];
bool bypass_spec_v1;
bool frozen;





atomic64_t refcnt __attribute__((__aligned__((1 << 6))));
atomic64_t usercnt;
struct work_struct work;
struct mutex freeze_mutex;
atomic64_t writecnt;





struct {
spinlock_t lock;
enum bpf_prog_type type;
bool jited;
bool xdp_has_frags;
} owner;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool map_value_has_spin_lock(const struct bpf_map *map)
{
return map->spin_lock_off >= 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool map_value_has_timer(const struct bpf_map *map)
{
return map->timer_off >= 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void check_and_init_map_value(struct bpf_map *map, void *dst)
{
if (__builtin_expect(!!(map_value_has_spin_lock(map)), 0))
memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
if (__builtin_expect(!!(map_value_has_timer(map)), 0))
memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0;

if (__builtin_expect(!!(map_value_has_spin_lock(map)), 0)) {
s_off = map->spin_lock_off;
s_sz = sizeof(struct bpf_spin_lock);
}
if (__builtin_expect(!!(map_value_has_timer(map)), 0)) {
t_off = map->timer_off;
t_sz = sizeof(struct bpf_timer);
}

if (__builtin_expect(!!(s_sz || t_sz), 0)) {
if (s_off < t_off || !s_sz) {
do { typeof(s_off) __tmp = (s_off); (s_off) = (t_off); (t_off) = __tmp; } while (0);
do { typeof(s_sz) __tmp = (s_sz); (s_sz) = (t_sz); (t_sz) = __tmp; } while (0);
}
memcpy(dst, src, t_off);
memcpy(dst + t_off + t_sz,
src + t_off + t_sz,
s_off - t_off - t_sz);
memcpy(dst + s_off + s_sz,
src + s_off + s_sz,
map->value_size - s_off - s_sz);
} else {
memcpy(dst, src, map->value_size);
}
}
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
void bpf_timer_cancel_and_free(void *timer);
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);

struct bpf_offload_dev;
struct bpf_offloaded_map;

struct bpf_map_dev_ops {
int (*map_get_next_key)(struct bpf_offloaded_map *map,
void *key, void *next_key);
int (*map_lookup_elem)(struct bpf_offloaded_map *map,
void *key, void *value);
int (*map_update_elem)(struct bpf_offloaded_map *map,
void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
};

struct bpf_offloaded_map {
struct bpf_map map;
struct net_device *netdev;
const struct bpf_map_dev_ops *dev_ops;
void *dev_priv;
struct list_head offloads;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
{
return ({ void *__mptr = (void *)(map); _Static_assert(__builtin_types_compatible_p(typeof(*(map)), typeof(((struct bpf_offloaded_map *)0)->map)) || __builtin_types_compatible_p(typeof(*(map)), typeof(void)), "pointer type mismatch in container_of()"); ((struct bpf_offloaded_map *)(__mptr - __builtin_offsetof(struct bpf_offloaded_map, map))); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_map_offload_neutral(const struct bpf_map *map)
{
return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_map_support_seq_show(const struct bpf_map *map)
{
return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
map->ops->map_seq_show_elem;
}

int map_check_no_btf(const struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);

bool bpf_map_meta_equal(const struct bpf_map *meta0,
const struct bpf_map *meta1);

extern const struct bpf_map_ops bpf_map_offload_ops;
# 320 "./include/linux/bpf.h"
enum bpf_type_flag {

PTR_MAYBE_NULL = ((((1UL))) << (0 + 8)),




MEM_RDONLY = ((((1UL))) << (1 + 8)),




MEM_ALLOC = ((((1UL))) << (2 + 8)),


MEM_USER = ((((1UL))) << (3 + 8)),







MEM_PERCPU = ((((1UL))) << (4 + 8)),

__BPF_TYPE_LAST_FLAG = MEM_PERCPU,
};
# 355 "./include/linux/bpf.h"
enum bpf_arg_type {
ARG_DONTCARE = 0,




ARG_CONST_MAP_PTR,
ARG_PTR_TO_MAP_KEY,
ARG_PTR_TO_MAP_VALUE,
ARG_PTR_TO_UNINIT_MAP_VALUE,




ARG_PTR_TO_MEM,
ARG_PTR_TO_UNINIT_MEM,




ARG_CONST_SIZE,
ARG_CONST_SIZE_OR_ZERO,

ARG_PTR_TO_CTX,
ARG_ANYTHING,
ARG_PTR_TO_SPIN_LOCK,
ARG_PTR_TO_SOCK_COMMON,
ARG_PTR_TO_INT,
ARG_PTR_TO_LONG,
ARG_PTR_TO_SOCKET,
ARG_PTR_TO_BTF_ID,
ARG_PTR_TO_ALLOC_MEM,
ARG_CONST_ALLOC_SIZE_OR_ZERO,
ARG_PTR_TO_BTF_ID_SOCK_COMMON,
ARG_PTR_TO_PERCPU_BTF_ID,
ARG_PTR_TO_FUNC,
ARG_PTR_TO_STACK,
ARG_PTR_TO_CONST_STR,
ARG_PTR_TO_TIMER,
__BPF_ARG_TYPE_MAX,


ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,




__BPF_ARG_TYPE_LIMIT = (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1)),
};
_Static_assert(__BPF_ARG_TYPE_MAX <= (1UL << 8), "__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT");


enum bpf_return_type {
RET_INTEGER,
RET_VOID,
RET_PTR_TO_MAP_VALUE,
RET_PTR_TO_SOCKET,
RET_PTR_TO_TCP_SOCK,
RET_PTR_TO_SOCK_COMMON,
RET_PTR_TO_ALLOC_MEM,
RET_PTR_TO_MEM_OR_BTF_ID,
RET_PTR_TO_BTF_ID,
__BPF_RET_TYPE_MAX,


RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM,
RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,




__BPF_RET_TYPE_LIMIT = (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1)),
};
_Static_assert(__BPF_RET_TYPE_MAX <= (1UL << 8), "__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT");





struct bpf_func_proto {
u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool gpl_only;
bool pkt_access;
enum bpf_return_type ret_type;
union {
struct {
enum bpf_arg_type arg1_type;
enum bpf_arg_type arg2_type;
enum bpf_arg_type arg3_type;
enum bpf_arg_type arg4_type;
enum bpf_arg_type arg5_type;
};
enum bpf_arg_type arg_type[5];
};
union {
struct {
u32 *arg1_btf_id;
u32 *arg2_btf_id;
u32 *arg3_btf_id;
u32 *arg4_btf_id;
u32 *arg5_btf_id;
};
u32 *arg_btf_id[5];
};
int *ret_btf_id;
bool (*allowed)(const struct bpf_prog *prog);
};





struct bpf_context;

enum bpf_access_type {
BPF_READ = 1,
BPF_WRITE = 2
};
# 493 "./include/linux/bpf.h"
enum bpf_reg_type {
NOT_INIT = 0,
SCALAR_VALUE,
PTR_TO_CTX,
CONST_PTR_TO_MAP,
PTR_TO_MAP_VALUE,
PTR_TO_MAP_KEY,
PTR_TO_STACK,
PTR_TO_PACKET_META,
PTR_TO_PACKET,
PTR_TO_PACKET_END,
PTR_TO_FLOW_KEYS,
PTR_TO_SOCKET,
PTR_TO_SOCK_COMMON,
PTR_TO_TCP_SOCK,
PTR_TO_TP_BUFFER,
PTR_TO_XDP_SOCK,
# 520 "./include/linux/bpf.h"
PTR_TO_BTF_ID,




PTR_TO_MEM,
PTR_TO_BUF,
PTR_TO_FUNC,
__BPF_REG_TYPE_MAX,


PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,




__BPF_REG_TYPE_LIMIT = (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1)),
};
_Static_assert(__BPF_REG_TYPE_MAX <= (1UL << 8), "__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT");




struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
union {
int ctx_field_size;
struct {
struct btf *btf;
u32 btf_id;
};
};
struct bpf_verifier_log *log;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
{
aux->ctx_field_size = size;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_pseudo_func(const struct bpf_insn *insn)
{
return insn->code == (0x00 | 0x00 | 0x18) &&
insn->src_reg == 4;
}

struct bpf_prog_ops {
int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr *uattr);
};

struct bpf_verifier_ops {

const struct bpf_func_proto *
(*get_func_proto)(enum bpf_func_id func_id,
const struct bpf_prog *prog);




bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
int (*gen_ld_abs)(const struct bpf_insn *orig,
struct bpf_insn *insn_buf);
u32 (*convert_ctx_access)(enum bpf_access_type type,
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
int (*btf_struct_access)(struct bpf_verifier_log *log,
const struct btf *btf,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
u32 *next_btf_id, enum bpf_type_flag *flag);
};

struct bpf_prog_offload_ops {

int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int (*finalize)(struct bpf_verifier_env *env);

int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
struct bpf_insn *insn);
int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);

int (*prepare)(struct bpf_prog *prog);
int (*translate)(struct bpf_prog *prog);
void (*destroy)(struct bpf_prog *prog);
};

struct bpf_prog_offload {
struct bpf_prog *prog;
struct net_device *netdev;
struct bpf_offload_dev *offdev;
void *dev_priv;
struct list_head offloads;
bool dev_state;
bool opt_failed;
void *jited_image;
u32 jited_len;
};

enum bpf_cgroup_storage_type {
BPF_CGROUP_STORAGE_SHARED,
BPF_CGROUP_STORAGE_PERCPU,
__BPF_CGROUP_STORAGE_MAX
};
# 648 "./include/linux/bpf.h"
struct btf_func_model {
u8 ret_size;
u8 nr_args;
u8 arg_size[12];
};
# 679 "./include/linux/bpf.h"
struct bpf_tramp_progs {
struct bpf_prog *progs[38];
int nr_progs;
};
# 704 "./include/linux/bpf.h"
struct bpf_tramp_image;
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
struct bpf_tramp_progs *tprogs,
void *orig_call);

u64 __attribute__((patchable_function_entry(0, 0))) __bpf_prog_enter(struct bpf_prog *prog);
void __attribute__((patchable_function_entry(0, 0))) __bpf_prog_exit(struct bpf_prog *prog, u64 start);
u64 __attribute__((patchable_function_entry(0, 0))) __bpf_prog_enter_sleepable(struct bpf_prog *prog);
void __attribute__((patchable_function_entry(0, 0))) __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
void __attribute__((patchable_function_entry(0, 0))) __bpf_tramp_enter(struct bpf_tramp_image *tr);
void __attribute__((patchable_function_entry(0, 0))) __bpf_tramp_exit(struct bpf_tramp_image *tr);

struct bpf_ksym {
unsigned long start;
unsigned long end;
char name[128];
struct list_head lnode;
struct latch_tree_node tnode;
bool prog;
};

enum bpf_tramp_prog_type {
BPF_TRAMP_FENTRY,
BPF_TRAMP_FEXIT,
BPF_TRAMP_MODIFY_RETURN,
BPF_TRAMP_MAX,
BPF_TRAMP_REPLACE,
};

struct bpf_tramp_image {
void *image;
struct bpf_ksym ksym;
struct percpu_ref pcref;
void *ip_after_call;
void *ip_epilogue;
union {
struct callback_head rcu;
struct work_struct work;
};
};

struct bpf_trampoline {

struct hlist_node hlist;

struct mutex mutex;
refcount_t refcnt;
u64 key;
struct {
struct btf_func_model model;
void *addr;
bool ftrace_managed;
} func;




struct bpf_prog *extension_prog;

struct hlist_head progs_hlist[BPF_TRAMP_MAX];

int progs_cnt[BPF_TRAMP_MAX];

struct bpf_tramp_image *cur_image;
u64 selector;
struct module *mod;
};

struct bpf_attach_target_info {
struct btf_func_model fmodel;
long tgt_addr;
const char *tgt_name;
const struct btf_type *tgt_type;
};



struct bpf_dispatcher_prog {
struct bpf_prog *prog;
refcount_t users;
};

struct bpf_dispatcher {

struct mutex mutex;
void *func;
struct bpf_dispatcher_prog progs[48];
int num_progs;
void *image;
u32 image_off;
struct bpf_ksym ksym;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) __attribute__((__no_sanitize__("cfi"))) unsigned int bpf_dispatcher_nop_func(
const void *ctx,
const struct bpf_insn *insnsi,
unsigned int (*bpf_func)(const void *,
const struct bpf_insn *))
{
return bpf_func(ctx, insnsi);
}
# 859 "./include/linux/bpf.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int bpf_trampoline_link_prog(struct bpf_prog *prog,
struct bpf_trampoline *tr)
{
return -524;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
struct bpf_trampoline *tr)
{
return -524;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info)
{
return ERR_PTR(-95);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_trampoline_put(struct bpf_trampoline *tr) {}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
struct bpf_prog *from,
struct bpf_prog *to) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_bpf_image_address(unsigned long address)
{
return false;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
{
return false;
}


struct bpf_func_info_aux {
u16 linkage;
bool unreliable;
};

enum bpf_jit_poke_reason {
BPF_POKE_REASON_TAIL_CALL,
};


struct bpf_jit_poke_descriptor {
void *tailcall_target;
void *tailcall_bypass;
void *bypass_addr;
void *aux;
union {
struct {
struct bpf_map *map;
u32 key;
} tail_call;
};
bool tailcall_target_stable;
u8 adj_off;
u16 reason;
u32 insn_idx;
};


struct bpf_ctx_arg_aux {
u32 offset;
enum bpf_reg_type reg_type;
u32 btf_id;
};

struct btf_mod_pair {
struct btf *btf;
struct module *module;
};

struct bpf_kfunc_desc_tab;

struct bpf_prog_aux {
atomic64_t refcnt;
u32 used_map_cnt;
u32 used_btf_cnt;
u32 max_ctx_offset;
u32 max_pkt_offset;
u32 max_tp_access;
u32 stack_depth;
u32 id;
u32 func_cnt;
u32 func_idx;
u32 attach_btf_id;
u32 ctx_arg_info_size;
u32 max_rdonly_access;
u32 max_rdwr_access;
struct btf *attach_btf;
const struct bpf_ctx_arg_aux *ctx_arg_info;
struct mutex dst_mutex;
struct bpf_prog *dst_prog;
struct bpf_trampoline *dst_trampoline;
enum bpf_prog_type saved_dst_prog_type;
enum bpf_attach_type saved_dst_attach_type;
bool verifier_zext;
bool offload_requested;
bool attach_btf_trace;
bool func_proto_unreliable;
bool sleepable;
bool tail_call_reachable;
bool xdp_has_frags;
bool use_bpf_prog_pack;
struct hlist_node tramp_hlist;

const struct btf_type *attach_func_proto;

const char *attach_func_name;
struct bpf_prog **func;
void *jit_data;
struct bpf_jit_poke_descriptor *poke_tab;
struct bpf_kfunc_desc_tab *kfunc_tab;
struct bpf_kfunc_btf_tab *kfunc_btf_tab;
u32 size_poke_tab;
struct bpf_ksym ksym;
const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
struct mutex used_maps_mutex;
struct btf_mod_pair *used_btfs;
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time;
u32 verified_insns;
struct bpf_map *cgroup_storage[__BPF_CGROUP_STORAGE_MAX];
char name[16U];



struct bpf_prog_offload *offload;
struct btf *btf;
struct bpf_func_info *func_info;
struct bpf_func_info_aux *func_info_aux;






struct bpf_line_info *linfo;







void **jited_linfo;
u32 func_info_cnt;
u32 nr_linfo;




u32 linfo_idx;
u32 num_exentries;
struct exception_table_entry *extable;
union {
struct work_struct work;
struct callback_head rcu;
};
};

struct bpf_array_aux {

struct list_head poke_progs;
struct bpf_map *map;
struct mutex poke_mutex;
struct work_struct work;
};

struct bpf_link {
atomic64_t refcnt;
u32 id;
enum bpf_link_type type;
const struct bpf_link_ops *ops;
struct bpf_prog *prog;
struct work_struct work;
};

struct bpf_link_ops {
void (*release)(struct bpf_link *link);
void (*dealloc)(struct bpf_link *link);
int (*detach)(struct bpf_link *link);
int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog);
void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
int (*fill_link_info)(const struct bpf_link *link,
struct bpf_link_info *info);
};

struct bpf_link_primer {
struct bpf_link *link;
struct file *file;
int fd;
u32 id;
};

struct bpf_struct_ops_value;
struct btf_member;


struct bpf_struct_ops {
const struct bpf_verifier_ops *verifier_ops;
int (*init)(struct btf *btf);
int (*check_member)(const struct btf_type *t,
const struct btf_member *member);
int (*init_member)(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata);
int (*reg)(void *kdata);
void (*unreg)(void *kdata);
const struct btf_type *type;
const struct btf_type *value_type;
const char *name;
struct btf_func_model func_models[64];
u32 type_id;
u32 value_id;
};
# 1122 "./include/linux/bpf.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
{
return ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_struct_ops_init(struct btf *btf,
struct bpf_verifier_log *log)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_try_module_get(const void *data, struct module *owner)
{
return try_module_get(owner);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_module_put(const void *data, struct module *owner)
{
module_put(owner);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
void *key,
void *value)
{
return -22;
}


struct bpf_array {
struct bpf_map map;
u32 elem_size;
u32 index_mask;
struct bpf_array_aux *aux;
union {
char value[0] __attribute__((__aligned__(8)));
void *ptrs[0] __attribute__((__aligned__(8)));
void *pptrs[0] __attribute__((__aligned__(8)));
};
};
# 1169 "./include/linux/bpf.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 bpf_map_flags_to_cap(struct bpf_map *map)
{
u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);




if (access_flags & BPF_F_RDONLY_PROG)
return ((((1UL))) << (0));
else if (access_flags & BPF_F_WRONLY_PROG)
return ((((1UL))) << (1));
else
return ((((1UL))) << (0)) | ((((1UL))) << (1));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_map_flags_access_ok(u32 access_flags)
{
return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
(BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
}

struct bpf_event_entry {
struct perf_event *event;
struct file *perf_file;
struct file *map_file;
struct callback_head rcu;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool map_type_contains_progs(struct bpf_map *map)
{
return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
map->map_type == BPF_MAP_TYPE_DEVMAP ||
map->map_type == BPF_MAP_TYPE_CPUMAP;
}

bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp);

const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);

typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog,
u32 *target_size);

u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
# 1233 "./include/linux/bpf.h"
struct bpf_prog_array_item {
struct bpf_prog *prog;
union {
struct bpf_cgroup_storage *cgroup_storage[__BPF_CGROUP_STORAGE_MAX];
u64 bpf_cookie;
};
};

struct bpf_prog_array {
struct callback_head rcu;
struct bpf_prog_array_item items[];
};

struct bpf_empty_prog_array {
struct bpf_prog_array hdr;
struct bpf_prog *null_prog;
};







extern struct bpf_empty_prog_array bpf_empty_prog_array;

struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
void bpf_prog_array_free(struct bpf_prog_array *progs);
int bpf_prog_array_length(struct bpf_prog_array *progs);
bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
__u32 *prog_ids, u32 cnt);

void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
struct bpf_prog *old_prog);
int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
struct bpf_prog *prog);
int bpf_prog_array_copy_info(struct bpf_prog_array *array,
u32 *prog_ids, u32 request_cnt,
u32 *prog_cnt);
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
u64 bpf_cookie,
struct bpf_prog_array **new_array);

struct bpf_run_ctx {};

struct bpf_cg_run_ctx {
struct bpf_run_ctx run_ctx;
const struct bpf_prog_array_item *prog_item;
int retval;
};

struct bpf_trace_run_ctx {
struct bpf_run_ctx run_ctx;
u64 bpf_cookie;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
{
struct bpf_run_ctx *old_ctx = ((void *)0);


old_ctx = get_current()->bpf_ctx;
get_current()->bpf_ctx = new_ctx;

return old_ctx;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
{

get_current()->bpf_ctx = old_ctx;

}






typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array *array_rcu,
const void *ctx, bpf_prog_run_fn run_prog,
int retval, u32 *ret_flags)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
struct bpf_cg_run_ctx run_ctx;
u32 func_ret;

run_ctx.retval = retval;
migrate_disable();
rcu_read_lock();
array = ({ typeof(*(array_rcu)) *__UNIQUE_ID_rcu531 = (typeof(*(array_rcu)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_532(void) ; if (!((sizeof((array_rcu)) == sizeof(char) || sizeof((array_rcu)) == sizeof(short) || sizeof((array_rcu)) == sizeof(int) || sizeof((array_rcu)) == sizeof(long)) || sizeof((array_rcu)) == sizeof(long long))) __compiletime_assert_532(); } while (0); (*(const volatile typeof( _Generic(((array_rcu)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((array_rcu)))) *)&((array_rcu))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(array_rcu)) *)(__UNIQUE_ID_rcu531)); });
item = &array->items[0];
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
while ((prog = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_533(void) ; if (!((sizeof(item->prog) == sizeof(char) || sizeof(item->prog) == sizeof(short) || sizeof(item->prog) == sizeof(int) || sizeof(item->prog) == sizeof(long)) || sizeof(item->prog) == sizeof(long long))) __compiletime_assert_533(); } while (0); (*(const volatile typeof( _Generic((item->prog), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (item->prog))) *)&(item->prog)); }))) {
run_ctx.prog_item = item;
func_ret = run_prog(prog, ctx);
if (!(func_ret & 1) && !__builtin_expect(!!((unsigned long)(void *)((long)run_ctx.retval) >= (unsigned long)-4095), 0))
run_ctx.retval = -1;
*(ret_flags) |= (func_ret >> 1);
item++;
}
bpf_reset_run_ctx(old_run_ctx);
rcu_read_unlock();
migrate_enable();
return run_ctx.retval;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int
BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array *array_rcu,
const void *ctx, bpf_prog_run_fn run_prog,
int retval)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
struct bpf_cg_run_ctx run_ctx;

run_ctx.retval = retval;
migrate_disable();
rcu_read_lock();
array = ({ typeof(*(array_rcu)) *__UNIQUE_ID_rcu534 = (typeof(*(array_rcu)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_535(void) ; if (!((sizeof((array_rcu)) == sizeof(char) || sizeof((array_rcu)) == sizeof(short) || sizeof((array_rcu)) == sizeof(int) || sizeof((array_rcu)) == sizeof(long)) || sizeof((array_rcu)) == sizeof(long long))) __compiletime_assert_535(); } while (0); (*(const volatile typeof( _Generic(((array_rcu)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((array_rcu)))) *)&((array_rcu))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(array_rcu)) *)(__UNIQUE_ID_rcu534)); });
item = &array->items[0];
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
while ((prog = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_536(void) ; if (!((sizeof(item->prog) == sizeof(char) || sizeof(item->prog) == sizeof(short) || sizeof(item->prog) == sizeof(int) || sizeof(item->prog) == sizeof(long)) || sizeof(item->prog) == sizeof(long long))) __compiletime_assert_536(); } while (0); (*(const volatile typeof( _Generic((item->prog), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (item->prog))) *)&(item->prog)); }))) {
run_ctx.prog_item = item;
if (!run_prog(prog, ctx) && !__builtin_expect(!!((unsigned long)(void *)((long)run_ctx.retval) >= (unsigned long)-4095), 0))
run_ctx.retval = -1;
item++;
}
bpf_reset_run_ctx(old_run_ctx);
rcu_read_unlock();
migrate_enable();
return run_ctx.retval;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) u32
BPF_PROG_RUN_ARRAY(const struct bpf_prog_array *array_rcu,
const void *ctx, bpf_prog_run_fn run_prog)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
struct bpf_trace_run_ctx run_ctx;
u32 ret = 1;

migrate_disable();
rcu_read_lock();
array = ({ typeof(*(array_rcu)) *__UNIQUE_ID_rcu537 = (typeof(*(array_rcu)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_538(void) ; if (!((sizeof((array_rcu)) == sizeof(char) || sizeof((array_rcu)) == sizeof(short) || sizeof((array_rcu)) == sizeof(int) || sizeof((array_rcu)) == sizeof(long)) || sizeof((array_rcu)) == sizeof(long long))) __compiletime_assert_538(); } while (0); (*(const volatile typeof( _Generic(((array_rcu)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((array_rcu)))) *)&((array_rcu))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(array_rcu)) *)(__UNIQUE_ID_rcu537)); });
if (__builtin_expect(!!(!array), 0))
goto out;
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
item = &array->items[0];
while ((prog = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_539(void) ; if (!((sizeof(item->prog) == sizeof(char) || sizeof(item->prog) == sizeof(short) || sizeof(item->prog) == sizeof(int) || sizeof(item->prog) == sizeof(long)) || sizeof(item->prog) == sizeof(long long))) __compiletime_assert_539(); } while (0); (*(const volatile typeof( _Generic((item->prog), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (item->prog))) *)&(item->prog)); }))) {
run_ctx.bpf_cookie = item->bpf_cookie;
ret &= run_prog(prog, ctx);
item++;
}
bpf_reset_run_ctx(old_run_ctx);
out:
rcu_read_unlock();
migrate_enable();
return ret;
}
# 1448 "./include/linux/bpf.h"
extern __attribute__((section(".data..percpu" ""))) __typeof__(int) bpf_prog_active;
extern struct mutex bpf_stats_enabled_mutex;







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_disable_instrumentation(void)
{
migrate_disable();
do { do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(bpf_prog_active)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active))); (typeof((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active))); (typeof((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active))); (typeof((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active))); (typeof((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_enable_instrumentation(void)
{
do { do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(bpf_prog_active)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active))); (typeof((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(bpf_prog_active))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active))); (typeof((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(bpf_prog_active))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active))); (typeof((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(bpf_prog_active))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active))); (typeof((typeof(*(&(bpf_prog_active))) *)(&(bpf_prog_active)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(bpf_prog_active))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
migrate_enable();
}

extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
extern const struct file_operations bpf_iter_fops;








# 1 "./include/linux/bpf_types.h" 1




extern const struct bpf_prog_ops sk_filter_prog_ops; extern const struct bpf_verifier_ops sk_filter_verifier_ops;

extern const struct bpf_prog_ops tc_cls_act_prog_ops; extern const struct bpf_verifier_ops tc_cls_act_verifier_ops;

extern const struct bpf_prog_ops tc_cls_act_prog_ops; extern const struct bpf_verifier_ops tc_cls_act_verifier_ops;

extern const struct bpf_prog_ops xdp_prog_ops; extern const struct bpf_verifier_ops xdp_verifier_ops;


extern const struct bpf_prog_ops cg_skb_prog_ops; extern const struct bpf_verifier_ops cg_skb_verifier_ops;

extern const struct bpf_prog_ops cg_sock_prog_ops; extern const struct bpf_verifier_ops cg_sock_verifier_ops;

extern const struct bpf_prog_ops cg_sock_addr_prog_ops; extern const struct bpf_verifier_ops cg_sock_addr_verifier_ops;


extern const struct bpf_prog_ops lwt_in_prog_ops; extern const struct bpf_verifier_ops lwt_in_verifier_ops;

extern const struct bpf_prog_ops lwt_out_prog_ops; extern const struct bpf_verifier_ops lwt_out_verifier_ops;

extern const struct bpf_prog_ops lwt_xmit_prog_ops; extern const struct bpf_verifier_ops lwt_xmit_verifier_ops;

extern const struct bpf_prog_ops lwt_seg6local_prog_ops; extern const struct bpf_verifier_ops lwt_seg6local_verifier_ops;

extern const struct bpf_prog_ops sock_ops_prog_ops; extern const struct bpf_verifier_ops sock_ops_verifier_ops;

extern const struct bpf_prog_ops sk_skb_prog_ops; extern const struct bpf_verifier_ops sk_skb_verifier_ops;

extern const struct bpf_prog_ops sk_msg_prog_ops; extern const struct bpf_verifier_ops sk_msg_verifier_ops;

extern const struct bpf_prog_ops flow_dissector_prog_ops; extern const struct bpf_verifier_ops flow_dissector_verifier_ops;



extern const struct bpf_prog_ops kprobe_prog_ops; extern const struct bpf_verifier_ops kprobe_verifier_ops;

extern const struct bpf_prog_ops tracepoint_prog_ops; extern const struct bpf_verifier_ops tracepoint_verifier_ops;

extern const struct bpf_prog_ops perf_event_prog_ops; extern const struct bpf_verifier_ops perf_event_verifier_ops;

extern const struct bpf_prog_ops raw_tracepoint_prog_ops; extern const struct bpf_verifier_ops raw_tracepoint_verifier_ops;

extern const struct bpf_prog_ops raw_tracepoint_writable_prog_ops; extern const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops;

extern const struct bpf_prog_ops tracing_prog_ops; extern const struct bpf_verifier_ops tracing_verifier_ops;



extern const struct bpf_prog_ops cg_dev_prog_ops; extern const struct bpf_verifier_ops cg_dev_verifier_ops;

extern const struct bpf_prog_ops cg_sysctl_prog_ops; extern const struct bpf_verifier_ops cg_sysctl_verifier_ops;

extern const struct bpf_prog_ops cg_sockopt_prog_ops; extern const struct bpf_verifier_ops cg_sockopt_verifier_ops;







extern const struct bpf_prog_ops sk_reuseport_prog_ops; extern const struct bpf_verifier_ops sk_reuseport_verifier_ops;

extern const struct bpf_prog_ops sk_lookup_prog_ops; extern const struct bpf_verifier_ops sk_lookup_verifier_ops;
# 80 "./include/linux/bpf_types.h"
extern const struct bpf_prog_ops bpf_syscall_prog_ops; extern const struct bpf_verifier_ops bpf_syscall_verifier_ops;


extern const struct bpf_map_ops array_map_ops;
extern const struct bpf_map_ops percpu_array_map_ops;
extern const struct bpf_map_ops prog_array_map_ops;
extern const struct bpf_map_ops perf_event_array_map_ops;

extern const struct bpf_map_ops cgroup_array_map_ops;


extern const struct bpf_map_ops cgroup_storage_map_ops;
extern const struct bpf_map_ops cgroup_storage_map_ops;

extern const struct bpf_map_ops htab_map_ops;
extern const struct bpf_map_ops htab_percpu_map_ops;
extern const struct bpf_map_ops htab_lru_map_ops;
extern const struct bpf_map_ops htab_lru_percpu_map_ops;
extern const struct bpf_map_ops trie_map_ops;

extern const struct bpf_map_ops stack_trace_map_ops;

extern const struct bpf_map_ops array_of_maps_map_ops;
extern const struct bpf_map_ops htab_of_maps_map_ops;



extern const struct bpf_map_ops task_storage_map_ops;

extern const struct bpf_map_ops dev_map_ops;
extern const struct bpf_map_ops dev_map_hash_ops;
extern const struct bpf_map_ops sk_storage_map_ops;
extern const struct bpf_map_ops cpu_map_ops;




extern const struct bpf_map_ops sock_map_ops;
extern const struct bpf_map_ops sock_hash_ops;
extern const struct bpf_map_ops reuseport_array_ops;


extern const struct bpf_map_ops queue_map_ops;
extern const struct bpf_map_ops stack_map_ops;



extern const struct bpf_map_ops ringbuf_map_ops;
extern const struct bpf_map_ops bloom_filter_map_ops;
# 1480 "./include/linux/bpf.h" 2




extern const struct bpf_prog_ops bpf_offload_prog_ops;
extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
extern const struct bpf_verifier_ops xdp_analyzer_ops;

struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
bool attach_drv);
void bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i);
void bpf_prog_inc(struct bpf_prog *prog);
struct bpf_prog * __attribute__((__warn_unused_result__)) bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);

void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);

struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map * __attribute__((__warn_unused_result__)) bpf_map_inc_not_zero(struct bpf_map *map);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
bool bpf_map_write_active(const struct bpf_map *map);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr *uattr);
int generic_map_update_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr *uattr);
int generic_map_delete_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr *uattr);
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
# 1532 "./include/linux/bpf.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *
bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
int node)
{
return kmalloc_node(size, flags, node);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *
bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
{
return kzalloc(size, flags);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *
bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
gfp_t flags)
{
return __alloc_percpu_gfp(size, align, flags);
}


extern int sysctl_unprivileged_bpf_disabled;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_allow_ptr_leaks(void)
{
return perfmon_capable();
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_allow_uninit_stack(void)
{
return perfmon_capable();
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_allow_ptr_to_map_access(void)
{
return perfmon_capable();
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_bypass_spec_v1(void)
{
return perfmon_capable();
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_bypass_spec_v4(void)
{
return perfmon_capable();
}

int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);

void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
const struct bpf_link_ops *ops, struct bpf_prog *prog);
int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
int bpf_link_settle(struct bpf_link_primer *primer);
void bpf_link_cleanup(struct bpf_link_primer *primer);
void bpf_link_inc(struct bpf_link *link);
void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
struct bpf_link *bpf_link_get_from_fd(u32 ufd);

int bpf_obj_pin_user(u32 ufd, const char *pathname);
int bpf_obj_get_user(const char *pathname, int flags);






struct bpf_iter_aux_info {
struct bpf_map *map;
};

typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
union bpf_iter_link_info *linfo,
struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
struct seq_file *seq);
typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
struct bpf_link_info *info);
typedef const struct bpf_func_proto *
(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
const struct bpf_prog *prog);

enum bpf_iter_feature {
BPF_ITER_RESCHED = ((((1UL))) << (0)),
};


struct bpf_iter_reg {
const char *target;
bpf_iter_attach_target_t attach_target;
bpf_iter_detach_target_t detach_target;
bpf_iter_show_fdinfo_t show_fdinfo;
bpf_iter_fill_link_info_t fill_link_info;
bpf_iter_get_func_proto_t get_func_proto;
u32 ctx_arg_info_size;
u32 feature;
struct bpf_ctx_arg_aux ctx_arg_info[2];
const struct bpf_iter_seq_info *seq_info;
};

struct bpf_iter_meta {
union { struct seq_file * seq; __u64 :64; } __attribute__((aligned(8)));
u64 session_id;
u64 seq_num;
};

struct bpf_iter__bpf_map_elem {
union { struct bpf_iter_meta * meta; __u64 :64; } __attribute__((aligned(8)));
union { struct bpf_map * map; __u64 :64; } __attribute__((aligned(8)));
union { void * key; __u64 :64; } __attribute__((aligned(8)));
union { void * value; __u64 :64; } __attribute__((aligned(8)));
};

int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
bool bpf_iter_prog_supported(struct bpf_prog *prog);
const struct bpf_func_proto *
bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
int bpf_iter_new_fd(struct bpf_link *link);
bool bpf_link_is_iter(struct bpf_link *link);
struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
struct seq_file *seq);
int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
struct bpf_link_info *info);

int map_set_for_each_callback_args(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee);

int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
u64 flags);
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
u64 flags);

int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);

int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);

int bpf_get_file_flag(int flags);
int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
size_t actual_size);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_long_memcpy(void *dst, const void *src, u32 size)
{
const long *lsrc = src;
long *ldst = dst;

size /= sizeof(long);
while (size--)
*ldst++ = *lsrc++;
}


int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);


void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);


struct btf *bpf_get_btf_vmlinux(void);


struct xdp_frame;
struct sk_buff;
struct bpf_dtab_netdev;
struct bpf_cpu_map_entry;

void __dev_flush(void);
int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
struct net_device *dev_rx);
int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
struct bpf_map *map, bool exclude_ingress);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
struct bpf_prog *xdp_prog);
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
struct bpf_prog *xdp_prog, struct bpf_map *map,
bool exclude_ingress);

void __cpu_map_flush(void);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
struct net_device *dev_rx);
int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
struct sk_buff *skb);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int bpf_map_attr_numa_node(const union bpf_attr *attr)
{
return (attr->map_flags & BPF_F_NUMA_NODE) ?
attr->numa_node : (-1);
}

struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
int array_map_alloc_check(union bpf_attr *attr);

int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr *uattr);
int bpf_prog_test_run_tracing(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr *uattr);
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr *uattr);
int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr *uattr);
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr *uattr);
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_tracing_ctx_access(int off, int size,
enum bpf_access_type type)
{
if (off < 0 || off >= sizeof(__u64) * 12)
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
return false;
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_tracing_btf_ctx_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (!bpf_tracing_ctx_access(off, size, type))
return false;
return btf_ctx_access(off, size, type, prog, info);
}

int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
u32 *next_btf_id, enum bpf_type_flag *flag);
bool btf_struct_ids_match(struct bpf_verifier_log *log,
const struct btf *btf, u32 id, int off,
const struct btf *need_btf, u32 need_type_id);

int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf *btf,
const struct btf_type *func_proto,
const char *func_name,
struct btf_func_model *m);

struct bpf_reg_state;
int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs);
int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
const struct btf *btf, u32 func_id,
struct bpf_reg_state *regs);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *reg);
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
struct btf *btf, const struct btf_type *t);

struct bpf_prog *bpf_prog_by_id(u32 id);
struct bpf_link *bpf_link_by_id(u32 id);

const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
void bpf_task_storage_free(struct task_struct *task);
bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
const struct btf_func_model *
bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
const struct bpf_insn *insn);
struct bpf_core_ctx {
struct bpf_verifier_log *log;
const struct btf *btf;
};

int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
int relo_idx, void *insn);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool unprivileged_ebpf_enabled(void)
{
return !sysctl_unprivileged_bpf_disabled;
}
# 2048 "./include/linux/bpf.h"
void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
struct btf_mod_pair *used_btfs, u32 len);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct bpf_prog *bpf_prog_get_type(u32 ufd,
enum bpf_prog_type type)
{
return bpf_prog_get_type_dev(ufd, type, false);
}

void __bpf_free_used_maps(struct bpf_prog_aux *aux,
struct bpf_map **used_maps, u32 len);

bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);

int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
struct bpf_prog *prog);

int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);

int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
int bpf_map_offload_update_elem(struct bpf_map *map,
void *key, void *value, u64 flags);
int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
int bpf_map_offload_get_next_key(struct bpf_map *map,
void *key, void *next_key);

bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);

struct bpf_offload_dev *
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
struct net_device *netdev);
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
struct net_device *netdev);
bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);


int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
{
return aux->offload_requested;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_map_is_dev_bound(struct bpf_map *map)
{
return __builtin_expect(!!(map->ops == &bpf_map_offload_ops), 0);
}

struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
int bpf_prog_test_run_syscall(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr *uattr);

int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
int sock_map_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr *uattr);

void sock_map_unhash(struct sock *sk);
void sock_map_close(struct sock *sk, long timeout);
# 2176 "./include/linux/bpf.h"
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
void *value);
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
void *value, u64 map_flags);
# 2203 "./include/linux/bpf.h"
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_map_push_elem_proto;
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
extern const struct bpf_func_proto bpf_map_peek_elem_proto;

extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
extern const struct bpf_func_proto bpf_get_stack_proto;
extern const struct bpf_func_proto bpf_get_task_stack_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
extern const struct bpf_func_proto bpf_get_stack_proto_pe;
extern const struct bpf_func_proto bpf_sock_map_update_proto;
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
extern const struct bpf_func_proto bpf_spin_lock_proto;
extern const struct bpf_func_proto bpf_spin_unlock_proto;
extern const struct bpf_func_proto bpf_get_local_storage_proto;
extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
extern const struct bpf_func_proto bpf_tcp_sock_proto;
extern const struct bpf_func_proto bpf_jiffies64_proto;
extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_event_output_data_proto;
extern const struct bpf_func_proto bpf_ringbuf_output_proto;
extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
extern const struct bpf_func_proto bpf_ringbuf_query_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
extern const struct bpf_func_proto bpf_copy_from_user_proto;
extern const struct bpf_func_proto bpf_snprintf_btf_proto;
extern const struct bpf_func_proto bpf_snprintf_proto;
extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
extern const struct bpf_func_proto bpf_sock_from_file_proto;
extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
extern const struct bpf_func_proto bpf_task_storage_get_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_proto;
extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
extern const struct bpf_func_proto bpf_find_vma_proto;
extern const struct bpf_func_proto bpf_loop_proto;
extern const struct bpf_func_proto bpf_strncmp_proto;
extern const struct bpf_func_proto bpf_copy_from_user_task_proto;

const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);


void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);


bool bpf_sock_common_is_valid_access(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info);
bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info);
u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size);
# 2315 "./include/linux/bpf.h"
struct sk_reuseport_kern {
struct sk_buff *skb;
struct sock *sk;
struct sock *selected_sk;
struct sock *migrating_sk;
void *data_end;
u32 hash;
u32 reuseport_id;
bool bind_inany;
};
bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info);

u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size);

bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info);

u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size);
# 2375 "./include/linux/bpf.h"
enum bpf_text_poke_type {
BPF_MOD_CALL,
BPF_MOD_JUMP,
};

int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *addr1, void *addr2);

void *bpf_arch_text_copy(void *dst, void *src, size_t len);

struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);



int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
u32 **bin_buf, u32 num_args);
void bpf_bprintf_cleanup(void);
# 10 "./include/linux/filter.h" 2

# 1 "./include/linux/compat.h" 1
# 18 "./include/linux/compat.h"
# 1 "./include/uapi/linux/aio_abi.h" 1
# 34 "./include/uapi/linux/aio_abi.h"
typedef __kernel_ulong_t aio_context_t;

enum {
IOCB_CMD_PREAD = 0,
IOCB_CMD_PWRITE = 1,
IOCB_CMD_FSYNC = 2,
IOCB_CMD_FDSYNC = 3,

IOCB_CMD_POLL = 5,
IOCB_CMD_NOOP = 6,
IOCB_CMD_PREADV = 7,
IOCB_CMD_PWRITEV = 8,
};
# 60 "./include/uapi/linux/aio_abi.h"
struct io_event {
__u64 data;
__u64 obj;
__s64 res;
__s64 res2;
};







struct iocb {

__u64 aio_data;


__u32 aio_key;
__kernel_rwf_t aio_rw_flags;
# 88 "./include/uapi/linux/aio_abi.h"
__u16 aio_lio_opcode;
__s16 aio_reqprio;
__u32 aio_fildes;

__u64 aio_buf;
__u64 aio_nbytes;
__s64 aio_offset;


__u64 aio_reserved2;


__u32 aio_flags;





__u32 aio_resfd;
};
# 19 "./include/linux/compat.h" 2

# 1 "./include/uapi/linux/unistd.h" 1







# 1 "./arch/riscv/include/asm/unistd.h" 1
# 14 "./arch/riscv/include/asm/unistd.h"
# 1 "./arch/riscv/include/uapi/asm/unistd.h" 1
# 25 "./arch/riscv/include/uapi/asm/unistd.h"
# 1 "./include/uapi/asm-generic/unistd.h" 1
# 26 "./arch/riscv/include/uapi/asm/unistd.h" 2
# 15 "./arch/riscv/include/asm/unistd.h" 2
# 9 "./include/uapi/linux/unistd.h" 2
# 21 "./include/linux/compat.h" 2

# 1 "./arch/riscv/include/generated/asm/compat.h" 1
# 1 "./include/asm-generic/compat.h" 1





typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_clock_t;
typedef s32 compat_pid_t;
typedef u32 compat_ino_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef s32 compat_daddr_t;
typedef s32 compat_timer_t;
typedef s32 compat_key_t;
typedef s16 compat_short_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef u16 compat_ushort_t;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u32 compat_uptr_t;
typedef u32 compat_caddr_t;
typedef u32 compat_aio_context_t;
typedef u32 compat_old_sigset_t;


typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;



typedef u32 compat_mode_t;






typedef s64 compat_s64;
typedef u64 compat_u64;



typedef u32 compat_sigset_word;
# 2 "./arch/riscv/include/generated/asm/compat.h" 2
# 23 "./include/linux/compat.h" 2
# 1 "./arch/riscv/include/generated/uapi/asm/siginfo.h" 1
# 24 "./include/linux/compat.h" 2
# 1 "./arch/riscv/include/generated/uapi/asm/signal.h" 1
# 25 "./include/linux/compat.h" 2
# 90 "./include/linux/compat.h"
struct compat_iovec {
compat_uptr_t iov_base;
compat_size_t iov_len;
};





typedef struct compat_sigaltstack {
compat_uptr_t ss_sp;
int ss_flags;
compat_size_t ss_size;
} compat_stack_t;
# 112 "./include/linux/compat.h"
typedef __compat_uid32_t compat_uid_t;
typedef __compat_gid32_t compat_gid_t;

struct compat_sel_arg_struct;
struct rusage;

struct old_itimerval32;

struct compat_tms {
compat_clock_t tms_utime;
compat_clock_t tms_stime;
compat_clock_t tms_cutime;
compat_clock_t tms_cstime;
};




typedef struct {
compat_sigset_word sig[(64 / 32)];
} compat_sigset_t;


int set_compat_user_sigmask(const compat_sigset_t *umask,
size_t sigsetsize);

struct compat_sigaction {

compat_uptr_t sa_handler;
compat_ulong_t sa_flags;







compat_sigset_t sa_mask __attribute__((__packed__));
};

typedef union compat_sigval {
compat_int_t sival_int;
compat_uptr_t sival_ptr;
} compat_sigval_t;

typedef struct compat_siginfo {
int si_signo;

int si_errno;
int si_code;





union {
int _pad[128/sizeof(int) - 3];


struct {
compat_pid_t _pid;
__compat_uid32_t _uid;
} _kill;


struct {
compat_timer_t _tid;
int _overrun;
compat_sigval_t _sigval;
} _timer;


struct {
compat_pid_t _pid;
__compat_uid32_t _uid;
compat_sigval_t _sigval;
} _rt;


struct {
compat_pid_t _pid;
__compat_uid32_t _uid;
int _status;
compat_clock_t _utime;
compat_clock_t _stime;
} _sigchld;
# 211 "./include/linux/compat.h"
struct {
compat_uptr_t _addr;


union {

int _trapno;




short int _addr_lsb;

struct {
char _dummy_bnd[(__alignof__(compat_uptr_t) < sizeof(short) ? sizeof(short) : __alignof__(compat_uptr_t))];
compat_uptr_t _lower;
compat_uptr_t _upper;
} _addr_bnd;

struct {
char _dummy_pkey[(__alignof__(compat_uptr_t) < sizeof(short) ? sizeof(short) : __alignof__(compat_uptr_t))];
u32 _pkey;
} _addr_pkey;

struct {
compat_ulong_t _data;
u32 _type;
} _perf;
};
} _sigfault;


struct {
compat_long_t _band;
int _fd;
} _sigpoll;

struct {
compat_uptr_t _call_addr;
int _syscall;
unsigned int _arch;
} _sigsys;
} _sifields;
} compat_siginfo_t;

struct compat_rlimit {
compat_ulong_t rlim_cur;
compat_ulong_t rlim_max;
};

struct compat_rusage {
struct old_timeval32 ru_utime;
struct old_timeval32 ru_stime;
compat_long_t ru_maxrss;
compat_long_t ru_ixrss;
compat_long_t ru_idrss;
compat_long_t ru_isrss;
compat_long_t ru_minflt;
compat_long_t ru_majflt;
compat_long_t ru_nswap;
compat_long_t ru_inblock;
compat_long_t ru_oublock;
compat_long_t ru_msgsnd;
compat_long_t ru_msgrcv;
compat_long_t ru_nsignals;
compat_long_t ru_nvcsw;
compat_long_t ru_nivcsw;
};

extern int put_compat_rusage(const struct rusage *,
struct compat_rusage *);

struct compat_siginfo;
struct __compat_aio_sigset;

struct compat_dirent {
u32 d_ino;
compat_off_t d_off;
u16 d_reclen;
char d_name[256];
};

struct compat_ustat {
compat_daddr_t f_tfree;
compat_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
};



typedef struct compat_sigevent {
compat_sigval_t sigev_value;
compat_int_t sigev_signo;
compat_int_t sigev_notify;
union {
compat_int_t _pad[((64/sizeof(int)) - 3)];
compat_int_t _tid;

struct {
compat_uptr_t _function;
compat_uptr_t _attribute;
} _sigev_thread;
} _sigev_un;
} compat_sigevent_t;

struct compat_ifmap {
compat_ulong_t mem_start;
compat_ulong_t mem_end;
unsigned short base_addr;
unsigned char irq;
unsigned char dma;
unsigned char port;
};

struct compat_if_settings {
unsigned int type;
unsigned int size;
compat_uptr_t ifs_ifsu;
};

struct compat_ifreq {
union {
char ifrn_name[16];
} ifr_ifrn;
union {
struct sockaddr ifru_addr;
struct sockaddr ifru_dstaddr;
struct sockaddr ifru_broadaddr;
struct sockaddr ifru_netmask;
struct sockaddr ifru_hwaddr;
short ifru_flags;
compat_int_t ifru_ivalue;
compat_int_t ifru_mtu;
struct compat_ifmap ifru_map;
char ifru_slave[16];
char ifru_newname[16];
compat_caddr_t ifru_data;
struct compat_if_settings ifru_settings;
} ifr_ifru;
};

struct compat_ifconf {
compat_int_t ifc_len;
compat_caddr_t ifcbuf;
};

struct compat_robust_list {
compat_uptr_t next;
};

struct compat_robust_list_head {
struct compat_robust_list list;
compat_long_t futex_offset;
compat_uptr_t list_op_pending;
};
# 377 "./include/linux/compat.h"
struct compat_keyctl_kdf_params {
compat_uptr_t hashname;
compat_uptr_t otherinfo;
__u32 otherinfolen;
__u32 __spare[8];
};

struct compat_stat;
struct compat_statfs;
struct compat_statfs64;
struct compat_old_linux_dirent;
struct compat_linux_dirent;
struct linux_dirent64;
struct compat_msghdr;
struct compat_mmsghdr;
struct compat_sysinfo;
struct compat_sysctl_args;
struct compat_kexec_segment;
struct compat_mq_attr;
struct compat_msgbuf;

void copy_siginfo_to_external32(struct compat_siginfo *to,
const struct kernel_siginfo *from);
int copy_siginfo_from_user32(kernel_siginfo_t *to,
const struct compat_siginfo *from);
int __copy_siginfo_to_user32(struct compat_siginfo *to,
const kernel_siginfo_t *from);



int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent *u_event);

extern int get_compat_sigset(sigset_t *set, const compat_sigset_t *compat);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
put_compat_sigset(compat_sigset_t *compat, const sigset_t *set,
unsigned int size)
{
# 434 "./include/linux/compat.h"
return copy_to_user(compat, set, size) ? -14 : 0;

}
# 505 "./include/linux/compat.h"
extern int compat_ptrace_request(struct task_struct *child,
compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);

extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);

struct epoll_event;

int compat_restore_altstack(const compat_stack_t *uss);
int __compat_save_altstack(compat_stack_t *, unsigned long);
# 539 "./include/linux/compat.h"
long compat_sys_io_setup(unsigned nr_reqs, u32 *ctx32p);
long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr,
u32 *iocb);
long compat_sys_io_pgetevents(compat_aio_context_t ctx_id,
compat_long_t min_nr,
compat_long_t nr,
struct io_event *events,
struct old_timespec32 *timeout,
const struct __compat_aio_sigset *usig);
long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id,
compat_long_t min_nr,
compat_long_t nr,
struct io_event *events,
struct __kernel_timespec *timeout,
const struct __compat_aio_sigset *usig);


long compat_sys_lookup_dcookie(u32, u32, char *, compat_size_t);


long compat_sys_epoll_pwait(int epfd,
struct epoll_event *events,
int maxevents, int timeout,
const compat_sigset_t *sigmask,
compat_size_t sigsetsize);
long compat_sys_epoll_pwait2(int epfd,
struct epoll_event *events,
int maxevents,
const struct __kernel_timespec *timeout,
const compat_sigset_t *sigmask,
compat_size_t sigsetsize);


long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);


long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);


long compat_sys_statfs(const char *pathname,
struct compat_statfs *buf);
long compat_sys_statfs64(const char *pathname,
compat_size_t sz,
struct compat_statfs64 *buf);
long compat_sys_fstatfs(unsigned int fd,
struct compat_statfs *buf);
long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 *buf);
long compat_sys_truncate(const char *, compat_off_t);
long compat_sys_ftruncate(unsigned int, compat_ulong_t);

long compat_sys_openat(int dfd, const char *filename,
int flags, umode_t mode);


long compat_sys_getdents(unsigned int fd,
struct compat_linux_dirent *dirent,
unsigned int count);


long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);

ssize_t compat_sys_preadv(compat_ulong_t fd,
const struct iovec *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
ssize_t compat_sys_pwritev(compat_ulong_t fd,
const struct iovec *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
# 624 "./include/linux/compat.h"
long compat_sys_sendfile(int out_fd, int in_fd,
compat_off_t *offset, compat_size_t count);
long compat_sys_sendfile64(int out_fd, int in_fd,
compat_loff_t *offset, compat_size_t count);


long compat_sys_pselect6_time32(int n, compat_ulong_t *inp,
compat_ulong_t *outp,
compat_ulong_t *exp,
struct old_timespec32 *tsp,
void *sig);
long compat_sys_pselect6_time64(int n, compat_ulong_t *inp,
compat_ulong_t *outp,
compat_ulong_t *exp,
struct __kernel_timespec *tsp,
void *sig);
long compat_sys_ppoll_time32(struct pollfd *ufds,
unsigned int nfds,
struct old_timespec32 *tsp,
const compat_sigset_t *sigmask,
compat_size_t sigsetsize);
long compat_sys_ppoll_time64(struct pollfd *ufds,
unsigned int nfds,
struct __kernel_timespec *tsp,
const compat_sigset_t *sigmask,
compat_size_t sigsetsize);


long compat_sys_signalfd4(int ufd,
const compat_sigset_t *sigmask,
compat_size_t sigsetsize, int flags);


long compat_sys_newfstatat(unsigned int dfd,
const char *filename,
struct compat_stat *statbuf,
int flag);
long compat_sys_newfstat(unsigned int fd,
struct compat_stat *statbuf);




long compat_sys_waitid(int, compat_pid_t,
struct compat_siginfo *, int,
struct compat_rusage *);




long
compat_sys_set_robust_list(struct compat_robust_list_head *head,
compat_size_t len);
long
compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr,
compat_size_t *len_ptr);


long compat_sys_getitimer(int which,
struct old_itimerval32 *it);
long compat_sys_setitimer(int which,
struct old_itimerval32 *in,
struct old_itimerval32 *out);


long compat_sys_kexec_load(compat_ulong_t entry,
compat_ulong_t nr_segments,
struct compat_kexec_segment *,
compat_ulong_t flags);


long compat_sys_timer_create(clockid_t which_clock,
struct compat_sigevent *timer_event_spec,
timer_t *created_timer_id);


long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
compat_long_t addr, compat_long_t data);


long compat_sys_sched_setaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t *user_mask_ptr);
long compat_sys_sched_getaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t *user_mask_ptr);


long compat_sys_sigaltstack(const compat_stack_t *uss_ptr,
compat_stack_t *uoss_ptr);
long compat_sys_rt_sigsuspend(compat_sigset_t *unewset,
compat_size_t sigsetsize);

long compat_sys_rt_sigaction(int,
const struct compat_sigaction *,
struct compat_sigaction *,
compat_size_t);

long compat_sys_rt_sigprocmask(int how, compat_sigset_t *set,
compat_sigset_t *oset,
compat_size_t sigsetsize);
long compat_sys_rt_sigpending(compat_sigset_t *uset,
compat_size_t sigsetsize);
long compat_sys_rt_sigtimedwait_time32(compat_sigset_t *uthese,
struct compat_siginfo *uinfo,
struct old_timespec32 *uts, compat_size_t sigsetsize);
long compat_sys_rt_sigtimedwait_time64(compat_sigset_t *uthese,
struct compat_siginfo *uinfo,
struct __kernel_timespec *uts, compat_size_t sigsetsize);
long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo *uinfo);



long compat_sys_times(struct compat_tms *tbuf);
long compat_sys_getrlimit(unsigned int resource,
struct compat_rlimit *rlim);
long compat_sys_setrlimit(unsigned int resource,
struct compat_rlimit *rlim);
long compat_sys_getrusage(int who, struct compat_rusage *ru);


long compat_sys_gettimeofday(struct old_timeval32 *tv,
struct timezone *tz);
long compat_sys_settimeofday(struct old_timeval32 *tv,
struct timezone *tz);


long compat_sys_sysinfo(struct compat_sysinfo *info);


long compat_sys_mq_open(const char *u_name,
int oflag, compat_mode_t mode,
struct compat_mq_attr *u_attr);
long compat_sys_mq_notify(mqd_t mqdes,
const struct compat_sigevent *u_notification);
long compat_sys_mq_getsetattr(mqd_t mqdes,
const struct compat_mq_attr *u_mqstat,
struct compat_mq_attr *u_omqstat);


long compat_sys_msgctl(int first, int second, void *uptr);
long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, int msgflg);


long compat_sys_semctl(int semid, int semnum, int cmd, int arg);


long compat_sys_shmctl(int first, int second, void *uptr);
long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);


long compat_sys_recvfrom(int fd, void *buf, compat_size_t len,
unsigned flags, struct sockaddr *addr,
int *addrlen);
long compat_sys_sendmsg(int fd, struct compat_msghdr *msg,
unsigned flags);
long compat_sys_recvmsg(int fd, struct compat_msghdr *msg,
unsigned int flags);




long compat_sys_keyctl(u32 option,
u32 arg2, u32 arg3, u32 arg4, u32 arg5);


long compat_sys_execve(const char *filename, const compat_uptr_t *argv,
const compat_uptr_t *envp);




long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
compat_pid_t pid, int sig,
struct compat_siginfo *uinfo);
long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr *mmsg,
unsigned vlen, unsigned int flags,
struct __kernel_timespec *timeout);
long compat_sys_recvmmsg_time32(int fd, struct compat_mmsghdr *mmsg,
unsigned vlen, unsigned int flags,
struct old_timespec32 *timeout);
long compat_sys_wait4(compat_pid_t pid,
compat_uint_t *stat_addr, int options,
struct compat_rusage *ru);
long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
int, const char *);
long compat_sys_open_by_handle_at(int mountdirfd,
struct file_handle *handle,
int flags);
long compat_sys_sendmmsg(int fd, struct compat_mmsghdr *mmsg,
unsigned vlen, unsigned int flags);
long compat_sys_execveat(int dfd, const char *filename,
const compat_uptr_t *argv,
const compat_uptr_t *envp, int flags);
ssize_t compat_sys_preadv2(compat_ulong_t fd,
const struct iovec *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
ssize_t compat_sys_pwritev2(compat_ulong_t fd,
const struct iovec *vec,
compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
# 847 "./include/linux/compat.h"
long compat_sys_open(const char *filename, int flags,
umode_t mode);


long compat_sys_signalfd(int ufd,
const compat_sigset_t *sigmask,
compat_size_t sigsetsize);


long compat_sys_newstat(const char *filename,
struct compat_stat *statbuf);
long compat_sys_newlstat(const char *filename,
struct compat_stat *statbuf);


long compat_sys_select(int n, compat_ulong_t *inp,
compat_ulong_t *outp, compat_ulong_t *exp,
struct old_timeval32 *tvp);
long compat_sys_ustat(unsigned dev, struct compat_ustat *u32);
long compat_sys_recv(int fd, void *buf, compat_size_t len,
unsigned flags);


long compat_sys_old_readdir(unsigned int fd,
struct compat_old_linux_dirent *,
unsigned int count);


long compat_sys_old_select(struct compat_sel_arg_struct *arg);


long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
# 896 "./include/linux/compat.h"
long compat_sys_socketcall(int call, u32 *args);
# 906 "./include/linux/compat.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct old_timeval32 ns_to_old_timeval32(s64 nsec)
{
struct __kernel_old_timeval tv;
struct old_timeval32 ctv;

tv = ns_to_kernel_old_timeval(nsec);
ctv.tv_sec = tv.tv_sec;
ctv.tv_usec = tv.tv_usec;

return ctv;
}







int kcompat_sys_statfs64(const char * pathname, compat_size_t sz,
struct compat_statfs64 * buf);
int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 * buf);
# 945 "./include/linux/compat.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool in_compat_syscall(void) { return false; }







long compat_get_bitmap(unsigned long *mask, const compat_ulong_t *umask,
unsigned long bitmap_size);
long compat_put_bitmap(compat_ulong_t *umask, unsigned long *mask,
unsigned long bitmap_size);
# 974 "./include/linux/compat.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *compat_ptr(compat_uptr_t uptr)
{
return (void *)(unsigned long)uptr;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) compat_uptr_t ptr_to_compat(void *uptr)
{
return (u32)(unsigned long)uptr;
}
# 12 "./include/linux/filter.h" 2






# 1 "./include/linux/set_memory.h" 1








# 1 "./arch/riscv/include/asm/set_memory.h" 1
# 14 "./arch/riscv/include/asm/set_memory.h"
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_rw_nx(unsigned long addr, int numpages);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int set_kernel_memory(char *startp, char *endp,
int (*set_memory)(unsigned long start,
int num_pages))
{
unsigned long start = (unsigned long)startp;
unsigned long end = (unsigned long)endp;
int num_pages = ((((end - start)) + ((typeof((end - start)))((((1UL) << (12)))) - 1)) & ~((typeof((end - start)))((((1UL) << (12)))) - 1)) >> (12);

return set_memory(start, num_pages);
}
# 43 "./arch/riscv/include/asm/set_memory.h"
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
bool kernel_page_present(struct page *page);
# 10 "./include/linux/set_memory.h" 2
# 37 "./include/linux/set_memory.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool can_set_direct_map(void)
{
return true;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int set_mce_nospec(unsigned long pfn, bool unmap)
{
return 0;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int clear_mce_nospec(unsigned long pfn)
{
return 0;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int set_memory_encrypted(unsigned long addr, int numpages)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int set_memory_decrypted(unsigned long addr, int numpages)
{
return 0;
}
# 19 "./include/linux/filter.h" 2

# 1 "./include/linux/if_vlan.h" 1
# 11 "./include/linux/if_vlan.h"
# 1 "./include/linux/etherdevice.h" 1
# 23 "./include/linux/etherdevice.h"
# 1 "./include/linux/crc32.h" 1








# 1 "./include/linux/bitrev.h" 1
# 15 "./include/linux/bitrev.h"
extern u8 const byte_rev_table[256];
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 __bitrev8(u8 byte)
{
return byte_rev_table[byte];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 __bitrev16(u16 x)
{
return (__bitrev8(x & 0xff) << 8) | __bitrev8(x >> 8);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 __bitrev32(u32 x)
{
return (__bitrev16(x & 0xffff) << 16) | __bitrev16(x >> 16);
}
# 10 "./include/linux/crc32.h" 2

u32 __attribute__((__pure__)) crc32_le(u32 crc, unsigned char const *p, size_t len);
u32 __attribute__((__pure__)) crc32_be(u32 crc, unsigned char const *p, size_t len);
# 32 "./include/linux/crc32.h"
u32 __attribute__((__const__)) crc32_le_shift(u32 crc, size_t len);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
{
return crc32_le_shift(crc1, len2) ^ crc2;
}

u32 __attribute__((__pure__)) __crc32c_le(u32 crc, unsigned char const *p, size_t len);
# 59 "./include/linux/crc32.h"
u32 __attribute__((__const__)) __crc32c_le_shift(u32 crc, size_t len);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
{
return __crc32c_le_shift(crc1, len2) ^ crc2;
}
# 24 "./include/linux/etherdevice.h" 2
# 1 "./arch/riscv/include/generated/asm/unaligned.h" 1
# 1 "./include/asm-generic/unaligned.h" 1
# 25 "./include/asm-generic/unaligned.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 get_unaligned_le16(const void *p)
{
return (( __u16)(__le16)(({ const struct { __le16 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; })));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 get_unaligned_le32(const void *p)
{
return (( __u32)(__le32)(({ const struct { __le32 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; })));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 get_unaligned_le64(const void *p)
{
return (( __u64)(__le64)(({ const struct { __le64 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; })));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_unaligned_le16(u16 val, void *p)
{
do { struct { __le16 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x = ((( __le16)(__u16)(val))); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_unaligned_le32(u32 val, void *p)
{
do { struct { __le32 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x = ((( __le32)(__u32)(val))); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_unaligned_le64(u64 val, void *p)
{
do { struct { __le64 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x = ((( __le64)(__u64)(val))); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 get_unaligned_be16(const void *p)
{
return (__builtin_constant_p((__u16)(( __u16)(__be16)(({ const struct { __be16 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; })))) ? ((__u16)( (((__u16)(( __u16)(__be16)(({ const struct { __be16 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(({ const struct { __be16 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(({ const struct { __be16 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 get_unaligned_be32(const void *p)
{
return (__builtin_constant_p((__u32)(( __u32)(__be32)(({ const struct { __be32 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; })))) ? ((__u32)( (((__u32)(( __u32)(__be32)(({ const struct { __be32 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(({ const struct { __be32 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(({ const struct { __be32 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(({ const struct { __be32 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(({ const struct { __be32 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 get_unaligned_be64(const void *p)
{
return (__builtin_constant_p((__u64)(( __u64)(__be64)(({ const struct { __be64 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; })))) ? ((__u64)( (((__u64)(( __u64)(__be64)(({ const struct { __be64 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(({ const struct { __be64 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(({ const struct { __be64 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(({ const struct { __be64 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(({ const struct { __be64 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(({ const struct { __be64 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(({ const struct { __be64 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(({ const struct { __be64 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(({ const struct { __be64 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x; }))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_unaligned_be16(u16 val, void *p)
{
do { struct { __be16 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x = ((( __be16)(__builtin_constant_p((__u16)((val))) ? ((__u16)( (((__u16)((val)) & (__u16)0x00ffU) << 8) | (((__u16)((val)) & (__u16)0xff00U) >> 8))) : __fswab16((val))))); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_unaligned_be32(u32 val, void *p)
{
do { struct { __be32 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x = ((( __be32)(__builtin_constant_p((__u32)((val))) ? ((__u32)( (((__u32)((val)) & (__u32)0x000000ffUL) << 24) | (((__u32)((val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((val)) & (__u32)0xff000000UL) >> 24))) : __fswab32((val))))); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_unaligned_be64(u64 val, void *p)
{
do { struct { __be64 x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(p); __pptr->x = ((( __be64)(__builtin_constant_p((__u64)((val))) ? ((__u64)( (((__u64)((val)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)((val)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)((val)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)((val)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)((val)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)((val)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)((val)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)((val)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64((val))))); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 __get_unaligned_be24(const u8 *p)
{
return p[0] << 16 | p[1] << 8 | p[2];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 get_unaligned_be24(const void *p)
{
return __get_unaligned_be24(p);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 __get_unaligned_le24(const u8 *p)
{
return p[0] | p[1] << 8 | p[2] << 16;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 get_unaligned_le24(const void *p)
{
return __get_unaligned_le24(p);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __put_unaligned_be24(const u32 val, u8 *p)
{
*p++ = val >> 16;
*p++ = val >> 8;
*p++ = val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_unaligned_be24(const u32 val, void *p)
{
__put_unaligned_be24(val, p);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __put_unaligned_le24(const u32 val, u8 *p)
{
*p++ = val;
*p++ = val >> 8;
*p++ = val >> 16;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_unaligned_le24(const u32 val, void *p)
{
__put_unaligned_le24(val, p);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __put_unaligned_be48(const u64 val, __u8 *p)
{
*p++ = val >> 40;
*p++ = val >> 32;
*p++ = val >> 24;
*p++ = val >> 16;
*p++ = val >> 8;
*p++ = val;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void put_unaligned_be48(const u64 val, void *p)
{
__put_unaligned_be48(val, p);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 __get_unaligned_be48(const u8 *p)
{
return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 |
p[3] << 16 | p[4] << 8 | p[5];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 get_unaligned_be48(const void *p)
{
return __get_unaligned_be48(p);
}
# 2 "./arch/riscv/include/generated/asm/unaligned.h" 2
# 25 "./include/linux/etherdevice.h" 2



struct device;
struct fwnode_handle;

int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
int platform_get_ethdev_address(struct device *dev, struct net_device *netdev);
unsigned char *arch_get_platform_mac_address(void);
int nvmem_get_mac_address(struct device *dev, void *addrbuf);
int device_get_mac_address(struct device *dev, char *addr);
int device_get_ethdev_address(struct device *dev, struct net_device *netdev);
int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr);

u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;

int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
const void *daddr, const void *saddr, unsigned len);
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
__be16 type);
void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
const unsigned char *haddr);
__be16 eth_header_parse_protocol(const struct sk_buff *skb);
int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
void eth_commit_mac_addr_change(struct net_device *dev, void *p);
int eth_mac_addr(struct net_device *dev, void *p);
int eth_validate_addr(struct net_device *dev);

struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
unsigned int rxqs);



struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
unsigned int txqs,
unsigned int rxqs);


struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
int eth_gro_complete(struct sk_buff *skb, int nhoff);


static const u8 eth_reserved_addr_base[6] __attribute__((__aligned__(2))) =
{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
# 83 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_link_local_ether_addr(const u8 *addr)
{
__be16 *a = (__be16 *)addr;
static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
static const __be16 m = (( __be16)(__builtin_constant_p((__u16)((0xfff0))) ? ((__u16)( (((__u16)((0xfff0)) & (__u16)0x00ffU) << 8) | (((__u16)((0xfff0)) & (__u16)0xff00U) >> 8))) : __fswab16((0xfff0))));





return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;

}
# 105 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_zero_ether_addr(const u8 *addr)
{



return (*(const u16 *)(addr + 0) |
*(const u16 *)(addr + 2) |
*(const u16 *)(addr + 4)) == 0;

}
# 123 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_multicast_ether_addr(const u8 *addr)
{



u16 a = *(const u16 *)addr;




return 0x01 & a;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_multicast_ether_addr_64bits(const u8 *addr)
{







return is_multicast_ether_addr(addr);

}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_local_ether_addr(const u8 *addr)
{
return 0x02 & addr[0];
}
# 169 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_broadcast_ether_addr(const u8 *addr)
{
return (*(const u16 *)(addr + 0) &
*(const u16 *)(addr + 2) &
*(const u16 *)(addr + 4)) == 0xffff;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_unicast_ether_addr(const u8 *addr)
{
return !is_multicast_ether_addr(addr);
}
# 198 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_valid_ether_addr(const u8 *addr)
{


return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
}
# 213 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool eth_proto_is_802_3(__be16 proto)
{


proto &= (( __be16)(__builtin_constant_p((__u16)((0xFF00))) ? ((__u16)( (((__u16)((0xFF00)) & (__u16)0x00ffU) << 8) | (((__u16)((0xFF00)) & (__u16)0xff00U) >> 8))) : __fswab16((0xFF00))));


return ( u16)proto >= ( u16)(( __be16)(__builtin_constant_p((__u16)((0x0600))) ? ((__u16)( (((__u16)((0x0600)) & (__u16)0x00ffU) << 8) | (((__u16)((0x0600)) & (__u16)0xff00U) >> 8))) : __fswab16((0x0600))));
}
# 230 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void eth_random_addr(u8 *addr)
{
get_random_bytes(addr, 6);
addr[0] &= 0xfe;
addr[0] |= 0x02;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void eth_broadcast_addr(u8 *addr)
{
memset(addr, 0xff, 6);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void eth_zero_addr(u8 *addr)
{
memset(addr, 0x00, 6);
}
# 268 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void eth_hw_addr_random(struct net_device *dev)
{
u8 addr[6];

eth_random_addr(addr);
__dev_addr_set(dev, addr, 6);
dev->addr_assign_type = 1;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 eth_hw_addr_crc(struct netdev_hw_addr *ha)
{
return ({ u32 __x = crc32_le(~0, ha->addr, 6); __builtin_constant_p(__x) ? ({ u32 ___x = __x; ___x = (___x >> 16) | (___x << 16); ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); ___x; }) : __bitrev32(__x); });
}
# 295 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ether_addr_copy(u8 *dst, const u8 *src)
{




u16 *a = (u16 *)dst;
const u16 *b = (const u16 *)src;

a[0] = b[0];
a[1] = b[1];
a[2] = b[2];

}
# 317 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
{
__dev_addr_set(dev, addr, 6);
}
# 330 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void eth_hw_addr_inherit(struct net_device *dst,
struct net_device *src)
{
dst->addr_assign_type = src->addr_assign_type;
eth_hw_addr_set(dst, src->dev_addr);
}
# 346 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
{






const u16 *a = (const u16 *)addr1;
const u16 *b = (const u16 *)addr2;

return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;

}
# 375 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
{
# 386 "./include/linux/etherdevice.h"
return ether_addr_equal(addr1, addr2);

}
# 399 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
{



return memcmp(addr1, addr2, 6) == 0;

}
# 418 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
const u8 *mask)
{
int i;

for (i = 0; i < 6; i++) {
if ((addr1[i] ^ addr2[i]) & mask[i])
return false;
}

return true;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 ether_addr_to_u64(const u8 *addr)
{
u64 u = 0;
int i;

for (i = 0; i < 6; i++)
u = u << 8 | addr[i];

return u;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void u64_to_ether_addr(u64 u, u8 *addr)
{
int i;

for (i = 6 - 1; i >= 0; i--) {
addr[i] = u & 0xff;
u = u >> 8;
}
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void eth_addr_dec(u8 *addr)
{
u64 u = ether_addr_to_u64(addr);

u--;
u64_to_ether_addr(u, addr);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void eth_addr_inc(u8 *addr)
{
u64 u = ether_addr_to_u64(addr);

u++;
u64_to_ether_addr(u, addr);
}
# 499 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_etherdev_addr(const struct net_device *dev,
const u8 addr[6 + 2])
{
struct netdev_hw_addr *ha;
bool res = false;

rcu_read_lock();
for (({ ; }), ha = ({ void *__mptr = (void *)(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_540(void) ; if (!((sizeof((&dev->dev_addrs.list)->next) == sizeof(char) || sizeof((&dev->dev_addrs.list)->next) == sizeof(short) || sizeof((&dev->dev_addrs.list)->next) == sizeof(int) || sizeof((&dev->dev_addrs.list)->next) == sizeof(long)) || sizeof((&dev->dev_addrs.list)->next) == sizeof(long long))) __compiletime_assert_540(); } while (0); (*(const volatile typeof( _Generic(((&dev->dev_addrs.list)->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&dev->dev_addrs.list)->next))) *)&((&dev->dev_addrs.list)->next)); })); _Static_assert(__builtin_types_compatible_p(typeof(*(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_540(void) ; if (!((sizeof((&dev->dev_addrs.list)->next) == sizeof(char) || sizeof((&dev->dev_addrs.list)->next) == sizeof(short) || sizeof((&dev->dev_addrs.list)->next) == sizeof(int) || sizeof((&dev->dev_addrs.list)->next) == sizeof(long)) || sizeof((&dev->dev_addrs.list)->next) == sizeof(long long))) __compiletime_assert_540(); } while (0); (*(const volatile typeof( _Generic(((&dev->dev_addrs.list)->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&dev->dev_addrs.list)->next))) *)&((&dev->dev_addrs.list)->next)); }))), typeof(((typeof(*ha) *)0)->list)) || __builtin_types_compatible_p(typeof(*(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_540(void) ; if (!((sizeof((&dev->dev_addrs.list)->next) == sizeof(char) || sizeof((&dev->dev_addrs.list)->next) == sizeof(short) || sizeof((&dev->dev_addrs.list)->next) == sizeof(int) || sizeof((&dev->dev_addrs.list)->next) == sizeof(long)) || sizeof((&dev->dev_addrs.list)->next) == sizeof(long long))) __compiletime_assert_540(); } while (0); (*(const volatile typeof( _Generic(((&dev->dev_addrs.list)->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&dev->dev_addrs.list)->next))) *)&((&dev->dev_addrs.list)->next)); }))), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*ha) *)(__mptr - __builtin_offsetof(typeof(*ha), list))); }); &ha->list != (&dev->dev_addrs.list); ha = ({ void *__mptr = (void *)(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_541(void) ; if (!((sizeof(ha->list.next) == sizeof(char) || sizeof(ha->list.next) == sizeof(short) || sizeof(ha->list.next) == sizeof(int) || sizeof(ha->list.next) == sizeof(long)) || sizeof(ha->list.next) == sizeof(long long))) __compiletime_assert_541(); } while (0); (*(const volatile typeof( _Generic((ha->list.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ha->list.next))) *)&(ha->list.next)); })); _Static_assert(__builtin_types_compatible_p(typeof(*(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_541(void) ; if (!((sizeof(ha->list.next) == sizeof(char) || sizeof(ha->list.next) == sizeof(short) || sizeof(ha->list.next) == sizeof(int) || sizeof(ha->list.next) == sizeof(long)) || sizeof(ha->list.next) == sizeof(long long))) __compiletime_assert_541(); } while (0); (*(const volatile typeof( _Generic((ha->list.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ha->list.next))) *)&(ha->list.next)); }))), typeof(((typeof(*ha) *)0)->list)) || __builtin_types_compatible_p(typeof(*(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_541(void) ; if (!((sizeof(ha->list.next) == sizeof(char) || sizeof(ha->list.next) == sizeof(short) || sizeof(ha->list.next) == sizeof(int) || sizeof(ha->list.next) == sizeof(long)) || sizeof(ha->list.next) == sizeof(long long))) __compiletime_assert_541(); } while (0); (*(const volatile typeof( _Generic((ha->list.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ha->list.next))) *)&(ha->list.next)); }))), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*ha) *)(__mptr - __builtin_offsetof(typeof(*ha), list))); })) {
res = ether_addr_equal_64bits(addr, ha->addr);
if (res)
break;
}
rcu_read_unlock();
return res;
}
# 528 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long compare_ether_header(const void *a, const void *b)
{
# 545 "./include/linux/etherdevice.h"
u32 *a32 = (u32 *)((u8 *)a + 2);
u32 *b32 = (u32 *)((u8 *)b + 2);

return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
(a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);

}
# 563 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
unsigned int id)
{
u64 u = ether_addr_to_u64(base_addr);
u8 addr[6];

u += id;
u64_to_ether_addr(u, addr);
eth_hw_addr_set(dev, addr);
}
# 581 "./include/linux/etherdevice.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int eth_skb_pad(struct sk_buff *skb)
{
return skb_put_padto(skb, 60);
}
# 12 "./include/linux/if_vlan.h" 2


# 1 "./include/uapi/linux/if_vlan.h" 1
# 21 "./include/uapi/linux/if_vlan.h"
enum vlan_ioctl_cmds {
ADD_VLAN_CMD,
DEL_VLAN_CMD,
SET_VLAN_INGRESS_PRIORITY_CMD,
SET_VLAN_EGRESS_PRIORITY_CMD,
GET_VLAN_INGRESS_PRIORITY_CMD,
GET_VLAN_EGRESS_PRIORITY_CMD,
SET_VLAN_NAME_TYPE_CMD,
SET_VLAN_FLAG_CMD,
GET_VLAN_REALDEV_NAME_CMD,
GET_VLAN_VID_CMD
};

enum vlan_flags {
VLAN_FLAG_REORDER_HDR = 0x1,
VLAN_FLAG_GVRP = 0x2,
VLAN_FLAG_LOOSE_BINDING = 0x4,
VLAN_FLAG_MVRP = 0x8,
VLAN_FLAG_BRIDGE_BINDING = 0x10,
};

enum vlan_name_types {
VLAN_NAME_TYPE_PLUS_VID,
VLAN_NAME_TYPE_RAW_PLUS_VID,
VLAN_NAME_TYPE_PLUS_VID_NO_PAD,
VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD,
VLAN_NAME_TYPE_HIGHEST
};

struct vlan_ioctl_args {
int cmd;
char device1[24];

union {
char device2[24];
int VID;
unsigned int skb_priority;
unsigned int name_type;
unsigned int bind_type;
unsigned int flag;
} u;

short vlan_qos;
};
# 15 "./include/linux/if_vlan.h" 2
# 35 "./include/linux/if_vlan.h"
struct vlan_hdr {
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
# 48 "./include/linux/if_vlan.h"
struct vlan_ethhdr {
union { struct { unsigned char h_dest[6]; unsigned char h_source[6]; } ; struct { unsigned char h_dest[6]; unsigned char h_source[6]; } addrs; };



__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
{
return (struct vlan_ethhdr *)skb_mac_header(skb);
}
# 72 "./include/linux/if_vlan.h"
extern void vlan_ioctl_set(int (*hook)(struct net *, void *));

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_vlan_dev(const struct net_device *dev)
{
return dev->priv_flags & IFF_802_1Q_VLAN;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int vlan_get_rx_ctag_filter_info(struct net_device *dev)
{
({ static bool __attribute__((__section__(".data.once"))) __already_done; bool __ret_do_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_do_once && !__already_done), 0)) { __already_done = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/linux/if_vlan.h", 87); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/if_vlan.h"), "i" (87), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_do_once), 0); });
return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vlan_drop_rx_ctag_filter_info(struct net_device *dev)
{
({ static bool __attribute__((__section__(".data.once"))) __already_done; bool __ret_do_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_do_once && !__already_done), 0)) { __already_done = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/linux/if_vlan.h", 93); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/if_vlan.h"), "i" (93), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_do_once), 0); });
call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int vlan_get_rx_stag_filter_info(struct net_device *dev)
{
({ static bool __attribute__((__section__(".data.once"))) __already_done; bool __ret_do_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_do_once && !__already_done), 0)) { __already_done = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/linux/if_vlan.h", 99); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/if_vlan.h"), "i" (99), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_do_once), 0); });
return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vlan_drop_rx_stag_filter_info(struct net_device *dev)
{
({ static bool __attribute__((__section__(".data.once"))) __already_done; bool __ret_do_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_do_once && !__already_done), 0)) { __already_done = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/linux/if_vlan.h", 105); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/if_vlan.h"), "i" (105), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_do_once), 0); });
call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev);
}
# 120 "./include/linux/if_vlan.h"
struct vlan_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
u64 rx_multicast;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
u32 rx_errors;
u32 tx_dropped;
};
# 231 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
vlan_for_each(struct net_device *dev,
int (*action)(struct net_device *dev, int vid, void *arg),
void *arg)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/if_vlan.h"), "i" (248), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0);
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 vlan_dev_vlan_id(const struct net_device *dev)
{
do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/if_vlan.h"), "i" (254), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be16 vlan_dev_vlan_proto(const struct net_device *dev)
{
do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/if_vlan.h"), "i" (260), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
u32 skprio)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool vlan_do_receive(struct sk_buff **skb)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int vlan_vids_add_by_dev(struct net_device *dev,
const struct net_device *by_dev)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vlan_vids_del_by_dev(struct net_device *dev,
const struct net_device *by_dev)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool vlan_uses_dev(const struct net_device *dev)
{
return false;
}
# 307 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool eth_type_vlan(__be16 ethertype)
{
switch (ethertype) {
case (( __be16)(__builtin_constant_p((__u16)((0x8100))) ? ((__u16)( (((__u16)((0x8100)) & (__u16)0x00ffU) << 8) | (((__u16)((0x8100)) & (__u16)0xff00U) >> 8))) : __fswab16((0x8100)))):
case (( __be16)(__builtin_constant_p((__u16)((0x88A8))) ? ((__u16)( (((__u16)((0x88A8)) & (__u16)0x00ffU) << 8) | (((__u16)((0x88A8)) & (__u16)0xff00U) >> 8))) : __fswab16((0x88A8)))):
return true;
default:
return false;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool vlan_hw_offload_capable(netdev_features_t features,
__be16 proto)
{
if (proto == (( __be16)(__builtin_constant_p((__u16)((0x8100))) ? ((__u16)( (((__u16)((0x8100)) & (__u16)0x00ffU) << 8) | (((__u16)((0x8100)) & (__u16)0xff00U) >> 8))) : __fswab16((0x8100)))) && features & ((netdev_features_t)1 << (NETIF_F_HW_VLAN_CTAG_TX_BIT)))
return true;
if (proto == (( __be16)(__builtin_constant_p((__u16)((0x88A8))) ? ((__u16)( (((__u16)((0x88A8)) & (__u16)0x00ffU) << 8) | (((__u16)((0x88A8)) & (__u16)0xff00U) >> 8))) : __fswab16((0x88A8)))) && features & ((netdev_features_t)1 << (NETIF_F_HW_VLAN_STAG_TX_BIT)))
return true;
return false;
}
# 340 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __vlan_insert_inner_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci,
unsigned int mac_len)
{
struct vlan_ethhdr *veth;

if (skb_cow_head(skb, 4) < 0)
return -12;

skb_push(skb, 4);


if (__builtin_expect(!!(mac_len > 2), 1))
memmove(skb->data, skb->data + 4, mac_len - 2);
skb->mac_header -= 4;

veth = (struct vlan_ethhdr *)(skb->data + mac_len - 14);


if (__builtin_expect(!!(mac_len >= 2), 1)) {



veth->h_vlan_proto = vlan_proto;
} else {



veth->h_vlan_encapsulated_proto = skb->protocol;
}


veth->h_vlan_TCI = (( __be16)(__builtin_constant_p((__u16)((vlan_tci))) ? ((__u16)( (((__u16)((vlan_tci)) & (__u16)0x00ffU) << 8) | (((__u16)((vlan_tci)) & (__u16)0xff00U) >> 8))) : __fswab16((vlan_tci))));

return 0;
}
# 388 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, 14);
}
# 409 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
__be16 vlan_proto,
u16 vlan_tci,
unsigned int mac_len)
{
int err;

err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
if (err) {
dev_kfree_skb_any(skb);
return ((void *)0);
}
return skb;
}
# 438 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, 14);
}
# 456 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
__be16 vlan_proto,
u16 vlan_tci)
{
skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
if (skb)
skb->protocol = vlan_proto;
return skb;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
{
skb->vlan_present = 0;
}
# 484 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
{
dst->vlan_present = src->vlan_present;
dst->vlan_proto = src->vlan_proto;
dst->vlan_tci = src->vlan_tci;
}
# 500 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
{
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
((skb)->vlan_tci));
if (__builtin_expect(!!(skb), 1))
__vlan_hwaccel_clear_tag(skb);
return skb;
}
# 517 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __vlan_hwaccel_put_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
skb->vlan_proto = vlan_proto;
skb->vlan_tci = vlan_tci;
skb->vlan_present = 1;
}
# 532 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;

if (!eth_type_vlan(veth->h_vlan_proto))
return -22;

*vlan_tci = (__builtin_constant_p((__u16)(( __u16)(__be16)(veth->h_vlan_TCI))) ? ((__u16)( (((__u16)(( __u16)(__be16)(veth->h_vlan_TCI)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(veth->h_vlan_TCI)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(veth->h_vlan_TCI)));
return 0;
}
# 550 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
{
if (((skb)->vlan_present)) {
*vlan_tci = ((skb)->vlan_tci);
return 0;
} else {
*vlan_tci = 0;
return -22;
}
}
# 569 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
if (skb->dev->features & ((netdev_features_t)1 << (NETIF_F_HW_VLAN_CTAG_TX_BIT))) {
return __vlan_hwaccel_get_tag(skb, vlan_tci);
} else {
return __vlan_get_tag(skb, vlan_tci);
}
}
# 587 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
int *depth)
{
unsigned int vlan_depth = skb->mac_len, parse_depth = 8;





if (eth_type_vlan(type)) {
if (vlan_depth) {
if (({ int __ret_warn_on = !!(vlan_depth < 4); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/if_vlan.h"), "i" (598), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }))
return 0;
vlan_depth -= 4;
} else {
vlan_depth = 14;
}
do {
struct vlan_hdr vhdr, *vh;

vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
if (__builtin_expect(!!(!vh || !--parse_depth), 0))
return 0;

type = vh->h_vlan_encapsulated_proto;
vlan_depth += 4;
} while (eth_type_vlan(type));
}

if (depth)
*depth = vlan_depth;

return type;
}
# 629 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be16 vlan_get_protocol(const struct sk_buff *skb)
{
return __vlan_get_protocol(skb, skb->protocol, ((void *)0));
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
{
if (!skip_vlan)



return ((skb)->vlan_present) ? skb->vlan_proto : skb->protocol;

return vlan_get_protocol(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void vlan_set_encap_proto(struct sk_buff *skb,
struct vlan_hdr *vhdr)
{
__be16 proto;
unsigned short *rawp;






proto = vhdr->h_vlan_encapsulated_proto;
if (eth_proto_is_802_3(proto)) {
skb->protocol = proto;
return;
}

rawp = (unsigned short *)(vhdr + 1);
if (*rawp == 0xFFFF)







skb->protocol = (( __be16)(__builtin_constant_p((__u16)((0x0001))) ? ((__u16)( (((__u16)((0x0001)) & (__u16)0x00ffU) << 8) | (((__u16)((0x0001)) & (__u16)0xff00U) >> 8))) : __fswab16((0x0001))));
else



skb->protocol = (( __be16)(__builtin_constant_p((__u16)((0x0004))) ? ((__u16)( (((__u16)((0x0004)) & (__u16)0x00ffU) << 8) | (((__u16)((0x0004)) & (__u16)0xff00U) >> 8))) : __fswab16((0x0004))));
}
# 689 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_vlan_tagged(const struct sk_buff *skb)
{
if (!((skb)->vlan_present) &&
__builtin_expect(!!(!eth_type_vlan(skb->protocol)), 1))
return false;

return true;
}
# 705 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_vlan_tagged_multi(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;

if (!((skb)->vlan_present)) {
struct vlan_ethhdr *veh;

if (__builtin_expect(!!(!eth_type_vlan(protocol)), 1))
return false;

if (__builtin_expect(!!(!pskb_may_pull(skb, 18)), 0))
return false;

veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
}

if (!eth_type_vlan(protocol))
return false;

return true;
}
# 735 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) netdev_features_t vlan_features_check(struct sk_buff *skb,
netdev_features_t features)
{
if (skb_vlan_tagged_multi(skb)) {





features &= ((netdev_features_t)1 << (NETIF_F_SG_BIT)) | ((netdev_features_t)1 << (NETIF_F_HIGHDMA_BIT)) | ((netdev_features_t)1 << (NETIF_F_HW_CSUM_BIT)) |
((netdev_features_t)1 << (NETIF_F_FRAGLIST_BIT)) | ((netdev_features_t)1 << (NETIF_F_HW_VLAN_CTAG_TX_BIT)) |
((netdev_features_t)1 << (NETIF_F_HW_VLAN_STAG_TX_BIT));
}

return features;
}
# 761 "./include/linux/if_vlan.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long compare_vlan_header(const struct vlan_hdr *h1,
const struct vlan_hdr *h2)
{



return (( u32)h1->h_vlan_TCI ^ ( u32)h2->h_vlan_TCI) |
(( u32)h1->h_vlan_encapsulated_proto ^
( u32)h2->h_vlan_encapsulated_proto);

}
# 21 "./include/linux/filter.h" 2


# 1 "./include/crypto/sha1.h" 1
# 20 "./include/crypto/sha1.h"
extern const u8 sha1_zero_message_hash[20];

struct sha1_state {
u32 state[20 / 4];
u64 count;
u8 buffer[64];
};

struct shash_desc;

extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len);

extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash);
# 43 "./include/crypto/sha1.h"
void sha1_init(__u32 *buf);
void sha1_transform(__u32 *digest, const char *data, __u32 *W);
# 24 "./include/linux/filter.h" 2


# 1 "./include/net/sch_generic.h" 1
# 19 "./include/net/sch_generic.h"
# 1 "./include/net/gen_stats.h" 1




# 1 "./include/uapi/linux/gen_stats.h" 1






enum {
TCA_STATS_UNSPEC,
TCA_STATS_BASIC,
TCA_STATS_RATE_EST,
TCA_STATS_QUEUE,
TCA_STATS_APP,
TCA_STATS_RATE_EST64,
TCA_STATS_PAD,
TCA_STATS_BASIC_HW,
TCA_STATS_PKT64,
__TCA_STATS_MAX,
};







struct gnet_stats_basic {
__u64 bytes;
__u32 packets;
};






struct gnet_stats_rate_est {
__u32 bps;
__u32 pps;
};






struct gnet_stats_rate_est64 {
__u64 bps;
__u64 pps;
};
# 59 "./include/uapi/linux/gen_stats.h"
struct gnet_stats_queue {
__u32 qlen;
__u32 backlog;
__u32 drops;
__u32 requeues;
__u32 overlimits;
};






struct gnet_estimator {
signed char interval;
unsigned char ewma_log;
};
# 6 "./include/net/gen_stats.h" 2
# 18 "./include/net/gen_stats.h"
struct gnet_stats_basic_sync {
u64_stats_t bytes;
u64_stats_t packets;
struct u64_stats_sync syncp;
} __attribute__((__aligned__(2 * sizeof(u64))));

struct net_rate_estimator;

struct gnet_dump {
spinlock_t * lock;
struct sk_buff * skb;
struct nlattr * tail;


int compat_tc_stats;
int compat_xstats;
int padattr;
void * xstats;
int xstats_len;
struct tc_stats tc_stats;
};

void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
struct gnet_dump *d, int padattr);

int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
int tc_stats_type, int xstats_type,
spinlock_t *lock, struct gnet_dump *d,
int padattr);

int gnet_stats_copy_basic(struct gnet_dump *d,
struct gnet_stats_basic_sync *cpu,
struct gnet_stats_basic_sync *b, bool running);
void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync *cpu,
struct gnet_stats_basic_sync *b, bool running);
int gnet_stats_copy_basic_hw(struct gnet_dump *d,
struct gnet_stats_basic_sync *cpu,
struct gnet_stats_basic_sync *b, bool running);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
struct net_rate_estimator **ptr);
int gnet_stats_copy_queue(struct gnet_dump *d,
struct gnet_stats_queue *cpu_q,
struct gnet_stats_queue *q, __u32 qlen);
void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
const struct gnet_stats_queue *cpu_q,
const struct gnet_stats_queue *q);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);

int gnet_stats_finish_copy(struct gnet_dump *d);

int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync *cpu_bstats,
struct net_rate_estimator **rate_est,
spinlock_t *lock,
bool running, struct nlattr *opt);
void gen_kill_estimator(struct net_rate_estimator **ptr);
int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync *cpu_bstats,
struct net_rate_estimator **ptr,
spinlock_t *lock,
bool running, struct nlattr *opt);
bool gen_estimator_active(struct net_rate_estimator **ptr);
bool gen_estimator_read(struct net_rate_estimator **ptr,
struct gnet_stats_rate_est64 *sample);
# 20 "./include/net/sch_generic.h" 2

# 1 "./include/net/flow_offload.h" 1








struct flow_match {
struct flow_dissector *dissector;
void *mask;
void *key;
};

struct flow_match_meta {
struct flow_dissector_key_meta *key, *mask;
};

struct flow_match_basic {
struct flow_dissector_key_basic *key, *mask;
};

struct flow_match_control {
struct flow_dissector_key_control *key, *mask;
};

struct flow_match_eth_addrs {
struct flow_dissector_key_eth_addrs *key, *mask;
};

struct flow_match_vlan {
struct flow_dissector_key_vlan *key, *mask;
};

struct flow_match_ipv4_addrs {
struct flow_dissector_key_ipv4_addrs *key, *mask;
};

struct flow_match_ipv6_addrs {
struct flow_dissector_key_ipv6_addrs *key, *mask;
};

struct flow_match_ip {
struct flow_dissector_key_ip *key, *mask;
};

struct flow_match_ports {
struct flow_dissector_key_ports *key, *mask;
};

struct flow_match_icmp {
struct flow_dissector_key_icmp *key, *mask;
};

struct flow_match_tcp {
struct flow_dissector_key_tcp *key, *mask;
};

struct flow_match_mpls {
struct flow_dissector_key_mpls *key, *mask;
};

struct flow_match_enc_keyid {
struct flow_dissector_key_keyid *key, *mask;
};

struct flow_match_enc_opts {
struct flow_dissector_key_enc_opts *key, *mask;
};

struct flow_match_ct {
struct flow_dissector_key_ct *key, *mask;
};

struct flow_rule;

void flow_rule_match_meta(const struct flow_rule *rule,
struct flow_match_meta *out);
void flow_rule_match_basic(const struct flow_rule *rule,
struct flow_match_basic *out);
void flow_rule_match_control(const struct flow_rule *rule,
struct flow_match_control *out);
void flow_rule_match_eth_addrs(const struct flow_rule *rule,
struct flow_match_eth_addrs *out);
void flow_rule_match_vlan(const struct flow_rule *rule,
struct flow_match_vlan *out);
void flow_rule_match_cvlan(const struct flow_rule *rule,
struct flow_match_vlan *out);
void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
struct flow_match_ipv4_addrs *out);
void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
struct flow_match_ipv6_addrs *out);
void flow_rule_match_ip(const struct flow_rule *rule,
struct flow_match_ip *out);
void flow_rule_match_ports(const struct flow_rule *rule,
struct flow_match_ports *out);
void flow_rule_match_tcp(const struct flow_rule *rule,
struct flow_match_tcp *out);
void flow_rule_match_icmp(const struct flow_rule *rule,
struct flow_match_icmp *out);
void flow_rule_match_mpls(const struct flow_rule *rule,
struct flow_match_mpls *out);
void flow_rule_match_enc_control(const struct flow_rule *rule,
struct flow_match_control *out);
void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
struct flow_match_ipv4_addrs *out);
void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
struct flow_match_ipv6_addrs *out);
void flow_rule_match_enc_ip(const struct flow_rule *rule,
struct flow_match_ip *out);
void flow_rule_match_enc_ports(const struct flow_rule *rule,
struct flow_match_ports *out);
void flow_rule_match_enc_keyid(const struct flow_rule *rule,
struct flow_match_enc_keyid *out);
void flow_rule_match_enc_opts(const struct flow_rule *rule,
struct flow_match_enc_opts *out);
void flow_rule_match_ct(const struct flow_rule *rule,
struct flow_match_ct *out);

enum flow_action_id {
FLOW_ACTION_ACCEPT = 0,
FLOW_ACTION_DROP,
FLOW_ACTION_TRAP,
FLOW_ACTION_GOTO,
FLOW_ACTION_REDIRECT,
FLOW_ACTION_MIRRED,
FLOW_ACTION_REDIRECT_INGRESS,
FLOW_ACTION_MIRRED_INGRESS,
FLOW_ACTION_VLAN_PUSH,
FLOW_ACTION_VLAN_POP,
FLOW_ACTION_VLAN_MANGLE,
FLOW_ACTION_TUNNEL_ENCAP,
FLOW_ACTION_TUNNEL_DECAP,
FLOW_ACTION_MANGLE,
FLOW_ACTION_ADD,
FLOW_ACTION_CSUM,
FLOW_ACTION_MARK,
FLOW_ACTION_PTYPE,
FLOW_ACTION_PRIORITY,
FLOW_ACTION_WAKE,
FLOW_ACTION_QUEUE,
FLOW_ACTION_SAMPLE,
FLOW_ACTION_POLICE,
FLOW_ACTION_CT,
FLOW_ACTION_CT_METADATA,
FLOW_ACTION_MPLS_PUSH,
FLOW_ACTION_MPLS_POP,
FLOW_ACTION_MPLS_MANGLE,
FLOW_ACTION_GATE,
FLOW_ACTION_PPPOE_PUSH,
FLOW_ACTION_JUMP,
FLOW_ACTION_PIPE,
FLOW_ACTION_VLAN_PUSH_ETH,
FLOW_ACTION_VLAN_POP_ETH,
NUM_FLOW_ACTIONS,
};





enum flow_action_mangle_base {
FLOW_ACT_MANGLE_UNSPEC = 0,
FLOW_ACT_MANGLE_HDR_TYPE_ETH,
FLOW_ACT_MANGLE_HDR_TYPE_IP4,
FLOW_ACT_MANGLE_HDR_TYPE_IP6,
FLOW_ACT_MANGLE_HDR_TYPE_TCP,
FLOW_ACT_MANGLE_HDR_TYPE_UDP,
};

enum flow_action_hw_stats_bit {
FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
FLOW_ACTION_HW_STATS_DELAYED_BIT,
FLOW_ACTION_HW_STATS_DISABLED_BIT,

FLOW_ACTION_HW_STATS_NUM_BITS
};

enum flow_action_hw_stats {
FLOW_ACTION_HW_STATS_IMMEDIATE =
((((1UL))) << (FLOW_ACTION_HW_STATS_IMMEDIATE_BIT)),
FLOW_ACTION_HW_STATS_DELAYED = ((((1UL))) << (FLOW_ACTION_HW_STATS_DELAYED_BIT)),
FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
FLOW_ACTION_HW_STATS_DELAYED,
FLOW_ACTION_HW_STATS_DISABLED =
((((1UL))) << (FLOW_ACTION_HW_STATS_DISABLED_BIT)),
FLOW_ACTION_HW_STATS_DONT_CARE = ((((1UL))) << (FLOW_ACTION_HW_STATS_NUM_BITS)) - 1,
};

typedef void (*action_destr)(void *priv);

struct flow_action_cookie {
u32 cookie_len;
u8 cookie[];
};

struct flow_action_cookie *flow_action_cookie_create(void *data,
unsigned int len,
gfp_t gfp);
void flow_action_cookie_destroy(struct flow_action_cookie *cookie);

struct flow_action_entry {
enum flow_action_id id;
u32 hw_index;
enum flow_action_hw_stats hw_stats;
action_destr destructor;
void *destructor_priv;
union {
u32 chain_index;
struct net_device *dev;
struct {
u16 vid;
__be16 proto;
u8 prio;
} vlan;
struct {
unsigned char dst[6];
unsigned char src[6];
} vlan_push_eth;
struct {

enum flow_action_mangle_base htype;
u32 offset;
u32 mask;
u32 val;
} mangle;
struct ip_tunnel_info *tunnel;
u32 csum_flags;
u32 mark;
u16 ptype;
u32 priority;
struct {
u32 ctx;
u32 index;
u8 vf;
} queue;
struct {
struct psample_group *psample_group;
u32 rate;
u32 trunc_size;
bool truncate;
} sample;
struct {
u32 burst;
u64 rate_bytes_ps;
u64 peakrate_bytes_ps;
u32 avrate;
u16 overhead;
u64 burst_pkt;
u64 rate_pkt_ps;
u32 mtu;
struct {
enum flow_action_id act_id;
u32 extval;
} exceed, notexceed;
} police;
struct {
int action;
u16 zone;
struct nf_flowtable *flow_table;
} ct;
struct {
unsigned long cookie;
u32 mark;
u32 labels[4];
bool orig_dir;
} ct_metadata;
struct {
u32 label;
__be16 proto;
u8 tc;
u8 bos;
u8 ttl;
} mpls_push;
struct {
__be16 proto;
} mpls_pop;
struct {
u32 label;
u8 tc;
u8 bos;
u8 ttl;
} mpls_mangle;
struct {
s32 prio;
u64 basetime;
u64 cycletime;
u64 cycletimeext;
u32 num_entries;
struct action_gate_entry *entries;
} gate;
struct {
u16 sid;
} pppoe;
};
struct flow_action_cookie *cookie;
};

struct flow_action {
unsigned int num_entries;
struct flow_action_entry entries[];
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool flow_action_has_entries(const struct flow_action *action)
{
return action->num_entries;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool flow_offload_has_one_action(const struct flow_action *action)
{
return action->num_entries == 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool flow_action_is_last_entry(const struct flow_action *action,
const struct flow_action_entry *entry)
{
return entry == &action->entries[action->num_entries - 1];
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
flow_action_mixed_hw_stats_check(const struct flow_action *action,
struct netlink_ext_ack *extack)
{
const struct flow_action_entry *action_entry;
u8 last_hw_stats;
int i;

if (flow_offload_has_one_action(action))
return true;

for (i = 0, action_entry = &(action)->entries[0]; i < (action)->num_entries; action_entry = &(action)->entries[++i]) {
if (i && action_entry->hw_stats != last_hw_stats) {
do { static const char __msg[] = "ipv6" ": " "Mixing HW stats types for actions is not supported"; struct netlink_ext_ack *__extack = ((extack)); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
return false;
}
last_hw_stats = action_entry->hw_stats;
}
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct flow_action_entry *
flow_action_first_entry_get(const struct flow_action *action)
{
({ int __ret_warn_on = !!(!flow_action_has_entries(action)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/flow_offload.h"), "i" (355), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
return &action->entries[0];
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
__flow_action_hw_stats_check(const struct flow_action *action,
struct netlink_ext_ack *extack,
bool check_allow_bit,
enum flow_action_hw_stats_bit allow_bit)
{
const struct flow_action_entry *action_entry;

if (!flow_action_has_entries(action))
return true;
if (!flow_action_mixed_hw_stats_check(action, extack))
return false;

action_entry = flow_action_first_entry_get(action);


({ int __ret_warn_on = !!(!action_entry->hw_stats); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/flow_offload.h"), "i" (375), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });

if (!check_allow_bit &&
~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) {
do { static const char __msg[] = "ipv6" ": " "Driver supports only default HW stats type \"any\""; struct netlink_ext_ack *__extack = ((extack)); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
return false;
} else if (check_allow_bit &&
!(action_entry->hw_stats & ((((1UL))) << (allow_bit)))) {
do { static const char __msg[] = "ipv6" ": " "Driver does not support selected HW stats type"; struct netlink_ext_ack *__extack = ((extack)); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
return false;
}
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
flow_action_hw_stats_check(const struct flow_action *action,
struct netlink_ext_ack *extack,
enum flow_action_hw_stats_bit allow_bit)
{
return __flow_action_hw_stats_check(action, extack, true, allow_bit);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
flow_action_basic_hw_stats_check(const struct flow_action *action,
struct netlink_ext_ack *extack)
{
return __flow_action_hw_stats_check(action, extack, false, 0);
}

struct flow_rule {
struct flow_match match;
struct flow_action action;
};

struct flow_rule *flow_rule_alloc(unsigned int num_actions);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool flow_rule_match_key(const struct flow_rule *rule,
enum flow_dissector_key_id key)
{
return dissector_uses_key(rule->match.dissector, key);
}

struct flow_stats {
u64 pkts;
u64 bytes;
u64 drops;
u64 lastused;
enum flow_action_hw_stats used_hw_stats;
bool used_hw_stats_valid;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flow_stats_update(struct flow_stats *flow_stats,
u64 bytes, u64 pkts,
u64 drops, u64 lastused,
enum flow_action_hw_stats used_hw_stats)
{
flow_stats->pkts += pkts;
flow_stats->bytes += bytes;
flow_stats->drops += drops;
flow_stats->lastused = __builtin_choose_expr(((!!(sizeof((typeof((u64)(flow_stats->lastused)) *)1 == (typeof((u64)(lastused)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(flow_stats->lastused)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(lastused)) * 0l)) : (int *)8))))), (((u64)(flow_stats->lastused)) > ((u64)(lastused)) ? ((u64)(flow_stats->lastused)) : ((u64)(lastused))), ({ typeof((u64)(flow_stats->lastused)) __UNIQUE_ID___x542 = ((u64)(flow_stats->lastused)); typeof((u64)(lastused)) __UNIQUE_ID___y543 = ((u64)(lastused)); ((__UNIQUE_ID___x542) > (__UNIQUE_ID___y543) ? (__UNIQUE_ID___x542) : (__UNIQUE_ID___y543)); }));




({ int __ret_warn_on = !!(used_hw_stats == FLOW_ACTION_HW_STATS_ANY); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/flow_offload.h"), "i" (439), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
flow_stats->used_hw_stats |= used_hw_stats;
flow_stats->used_hw_stats_valid = true;
}

enum flow_block_command {
FLOW_BLOCK_BIND,
FLOW_BLOCK_UNBIND,
};

enum flow_block_binder_type {
FLOW_BLOCK_BINDER_TYPE_UNSPEC,
FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
FLOW_BLOCK_BINDER_TYPE_RED_MARK,
};

struct flow_block {
struct list_head cb_list;
};

struct netlink_ext_ack;

struct flow_block_offload {
enum flow_block_command command;
enum flow_block_binder_type binder_type;
bool block_shared;
bool unlocked_driver_cb;
struct net *net;
struct flow_block *block;
struct list_head cb_list;
struct list_head *driver_block_list;
struct netlink_ext_ack *extack;
struct Qdisc *sch;
struct list_head *cb_list_head;
};

enum tc_setup_type;
typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
void *cb_priv);

struct flow_block_cb;

struct flow_block_indr {
struct list_head list;
struct net_device *dev;
struct Qdisc *sch;
enum flow_block_binder_type binder_type;
void *data;
void *cb_priv;
void (*cleanup)(struct flow_block_cb *block_cb);
};

struct flow_block_cb {
struct list_head driver_list;
struct list_head list;
flow_setup_cb_t *cb;
void *cb_ident;
void *cb_priv;
void (*release)(void *cb_priv);
struct flow_block_indr indr;
unsigned int refcnt;
};

struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv));
struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv),
struct flow_block_offload *bo,
struct net_device *dev,
struct Qdisc *sch, void *data,
void *indr_cb_priv,
void (*cleanup)(struct flow_block_cb *block_cb));
void flow_block_cb_free(struct flow_block_cb *block_cb);

struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
flow_setup_cb_t *cb, void *cb_ident);

void *flow_block_cb_priv(struct flow_block_cb *block_cb);
void flow_block_cb_incref(struct flow_block_cb *block_cb);
unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flow_block_cb_add(struct flow_block_cb *block_cb,
struct flow_block_offload *offload)
{
list_add_tail(&block_cb->list, &offload->cb_list);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flow_block_cb_remove(struct flow_block_cb *block_cb,
struct flow_block_offload *offload)
{
list_move(&block_cb->list, &offload->cb_list);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
struct flow_block_offload *offload)
{
list_del(&block_cb->indr.list);
list_move(&block_cb->list, &offload->cb_list);
}

bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
struct list_head *driver_block_list);

int flow_block_cb_setup_simple(struct flow_block_offload *f,
struct list_head *driver_list,
flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv, bool ingress_only);

enum flow_cls_command {
FLOW_CLS_REPLACE,
FLOW_CLS_DESTROY,
FLOW_CLS_STATS,
FLOW_CLS_TMPLT_CREATE,
FLOW_CLS_TMPLT_DESTROY,
};

struct flow_cls_common_offload {
u32 chain_index;
__be16 protocol;
u32 prio;
struct netlink_ext_ack *extack;
};

struct flow_cls_offload {
struct flow_cls_common_offload common;
enum flow_cls_command command;
unsigned long cookie;
struct flow_rule *rule;
struct flow_stats stats;
u32 classid;
};

enum offload_act_command {
FLOW_ACT_REPLACE,
FLOW_ACT_DESTROY,
FLOW_ACT_STATS,
};

struct flow_offload_action {
struct netlink_ext_ack *extack;
enum offload_act_command command;
enum flow_action_id id;
u32 index;
struct flow_stats stats;
struct flow_action action;
};

struct flow_offload_action *offload_action_alloc(unsigned int num_actions);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct flow_rule *
flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
{
return flow_cmd->rule;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void flow_block_init(struct flow_block *flow_block)
{
INIT_LIST_HEAD(&flow_block->cb_list);
}

typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
void *data,
void (*cleanup)(struct flow_block_cb *block_cb));

int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
void (*release)(void *cb_priv));
int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
enum tc_setup_type type, void *data,
struct flow_block_offload *bo,
void (*cleanup)(struct flow_block_cb *block_cb));
# 22 "./include/net/sch_generic.h" 2

struct Qdisc_ops;
struct qdisc_walker;
struct tcf_walker;
struct module;
struct bpf_flow_keys;

struct qdisc_rate_table {
struct tc_ratespec rate;
u32 data[256];
struct qdisc_rate_table *next;
int refcnt;
};

enum qdisc_state_t {
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
__QDISC_STATE_MISSED,
__QDISC_STATE_DRAINING,
};

enum qdisc_state2_t {



__QDISC_STATE2_RUNNING,
};







struct qdisc_size_table {
struct callback_head rcu;
struct list_head list;
struct tc_sizespec szopts;
int refcnt;
u16 data[];
};


struct qdisc_skb_head {
struct sk_buff *head;
struct sk_buff *tail;
__u32 qlen;
spinlock_t lock;
};

struct Qdisc {
int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch,
struct sk_buff **to_free);
struct sk_buff * (*dequeue)(struct Qdisc *sch);
unsigned int flags;
# 97 "./include/net/sch_generic.h"
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table *stab;
struct hlist_node hash;
u32 handle;
u32 parent;

struct netdev_queue *dev_queue;

struct net_rate_estimator *rate_est;
struct gnet_stats_basic_sync *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
int pad;
refcount_t refcnt;




struct sk_buff_head gso_skb __attribute__((__aligned__((1 << 6))));
struct qdisc_skb_head q;
struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
unsigned long state;
unsigned long state2;
struct Qdisc *next_sched;
struct sk_buff_head skb_bad_txq;

spinlock_t busylock __attribute__((__aligned__((1 << 6))));
spinlock_t seqlock;

struct callback_head rcu;
netdevice_tracker dev_tracker;

long privdata[] __attribute__((__aligned__((1 << 6))));
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_refcount_inc(struct Qdisc *qdisc)
{
if (qdisc->flags & 1)
return;
refcount_inc(&qdisc->refcnt);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
{
if (qdisc->flags & 1)
return qdisc;
if (refcount_inc_not_zero(&qdisc->refcnt))
return qdisc;
return ((void *)0);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool qdisc_is_running(struct Qdisc *qdisc)
{
if (qdisc->flags & 0x100)
return spin_is_locked(&qdisc->seqlock);
return arch_test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
{
return !(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_544(void) ; if (!((sizeof(qdisc->state) == sizeof(char) || sizeof(qdisc->state) == sizeof(short) || sizeof(qdisc->state) == sizeof(int) || sizeof(qdisc->state) == sizeof(long)) || sizeof(qdisc->state) == sizeof(long long))) __compiletime_assert_544(); } while (0); (*(const volatile typeof( _Generic((qdisc->state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (qdisc->state))) *)&(qdisc->state)); }) & (((((1UL))) << (__QDISC_STATE_MISSED)) | ((((1UL))) << (__QDISC_STATE_DRAINING))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool qdisc_is_percpu_stats(const struct Qdisc *q)
{
return q->flags & 0x20;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool qdisc_is_empty(const struct Qdisc *qdisc)
{
if (qdisc_is_percpu_stats(qdisc))
return nolock_qdisc_is_empty(qdisc);
return !({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_545(void) ; if (!((sizeof(qdisc->q.qlen) == sizeof(char) || sizeof(qdisc->q.qlen) == sizeof(short) || sizeof(qdisc->q.qlen) == sizeof(int) || sizeof(qdisc->q.qlen) == sizeof(long)) || sizeof(qdisc->q.qlen) == sizeof(long long))) __compiletime_assert_545(); } while (0); (*(const volatile typeof( _Generic((qdisc->q.qlen), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (qdisc->q.qlen))) *)&(qdisc->q.qlen)); });
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc->flags & 0x100) {
if (spin_trylock(&qdisc->seqlock))
return true;





do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);






if (arch_test_bit(__QDISC_STATE_MISSED, &qdisc->state))
return false;
# 211 "./include/net/sch_generic.h"
set_bit(__QDISC_STATE_MISSED, &qdisc->state);





do { do { } while (0); __asm__ __volatile__ ("fence " "rw" "," "rw" : : : "memory"); } while (0);




return spin_trylock(&qdisc->seqlock);
}
return !arch___test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_run_end(struct Qdisc *qdisc)
{
if (qdisc->flags & 0x100) {
spin_unlock(&qdisc->seqlock);

if (__builtin_expect(!!(arch_test_bit(__QDISC_STATE_MISSED, &qdisc->state)), 0))

__netif_schedule(qdisc);
} else {
arch___clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool qdisc_may_bulk(const struct Qdisc *qdisc)
{
return qdisc->flags & 0x10;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int qdisc_avail_bulklimit(const struct netdev_queue *txq)
{


return dql_avail(&txq->dql);



}

struct Qdisc_class_ops {
unsigned int flags;

struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
int (*graft)(struct Qdisc *, unsigned long cl,
struct Qdisc *, struct Qdisc **,
struct netlink_ext_ack *extack);
struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
void (*qlen_notify)(struct Qdisc *, unsigned long);


unsigned long (*find)(struct Qdisc *, u32 classid);
int (*change)(struct Qdisc *, u32, u32,
struct nlattr **, unsigned long *,
struct netlink_ext_ack *);
int (*delete)(struct Qdisc *, unsigned long,
struct netlink_ext_ack *);
void (*walk)(struct Qdisc *, struct qdisc_walker * arg);


struct tcf_block * (*tcf_block)(struct Qdisc *sch,
unsigned long arg,
struct netlink_ext_ack *extack);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid);
void (*unbind_tcf)(struct Qdisc *, unsigned long);


int (*dump)(struct Qdisc *, unsigned long,
struct sk_buff *skb, struct tcmsg*);
int (*dump_stats)(struct Qdisc *, unsigned long,
struct gnet_dump *);
};




enum qdisc_class_ops_flags {
QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
};

struct Qdisc_ops {
struct Qdisc_ops *next;
const struct Qdisc_class_ops *cl_ops;
char id[16];
int priv_size;
unsigned int static_flags;

int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch,
struct sk_buff **to_free);
struct sk_buff * (*dequeue)(struct Qdisc *);
struct sk_buff * (*peek)(struct Qdisc *);

int (*init)(struct Qdisc *sch, struct nlattr *arg,
struct netlink_ext_ack *extack);
void (*reset)(struct Qdisc *);
void (*destroy)(struct Qdisc *);
int (*change)(struct Qdisc *sch,
struct nlattr *arg,
struct netlink_ext_ack *extack);
void (*attach)(struct Qdisc *sch);
int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
void (*change_real_num_tx)(struct Qdisc *sch,
unsigned int new_real_tx);

int (*dump)(struct Qdisc *, struct sk_buff *);
int (*dump_stats)(struct Qdisc *, struct gnet_dump *);

void (*ingress_block_set)(struct Qdisc *sch,
u32 block_index);
void (*egress_block_set)(struct Qdisc *sch,
u32 block_index);
u32 (*ingress_block_get)(struct Qdisc *sch);
u32 (*egress_block_get)(struct Qdisc *sch);

struct module *owner;
};


struct tcf_result {
union {
struct {
unsigned long class;
u32 classid;
};
const struct tcf_proto *goto_tp;


struct {
bool ingress;
struct gnet_stats_queue *qstats;
};
};
};

struct tcf_chain;

struct tcf_proto_ops {
struct list_head head;
char kind[16];

int (*classify)(struct sk_buff *,
const struct tcf_proto *,
struct tcf_result *);
int (*init)(struct tcf_proto*);
void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
struct netlink_ext_ack *extack);

void* (*get)(struct tcf_proto*, u32 handle);
void (*put)(struct tcf_proto *tp, void *f);
int (*change)(struct net *net, struct sk_buff *,
struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
void **, u32,
struct netlink_ext_ack *);
int (*delete)(struct tcf_proto *tp, void *arg,
bool *last, bool rtnl_held,
struct netlink_ext_ack *);
bool (*delete_empty)(struct tcf_proto *tp);
void (*walk)(struct tcf_proto *tp,
struct tcf_walker *arg, bool rtnl_held);
int (*reoffload)(struct tcf_proto *tp, bool add,
flow_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack);
void (*hw_add)(struct tcf_proto *tp,
void *type_data);
void (*hw_del)(struct tcf_proto *tp,
void *type_data);
void (*bind_class)(void *, u32, unsigned long,
void *, unsigned long);
void * (*tmplt_create)(struct net *net,
struct tcf_chain *chain,
struct nlattr **tca,
struct netlink_ext_ack *extack);
void (*tmplt_destroy)(void *tmplt_priv);


int (*dump)(struct net*, struct tcf_proto*, void *,
struct sk_buff *skb, struct tcmsg*,
bool);
int (*terse_dump)(struct net *net,
struct tcf_proto *tp, void *fh,
struct sk_buff *skb,
struct tcmsg *t, bool rtnl_held);
int (*tmplt_dump)(struct sk_buff *skb,
struct net *net,
void *tmplt_priv);

struct module *owner;
int flags;
};





enum tcf_proto_ops_flags {
TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
};

struct tcf_proto {

struct tcf_proto *next;
void *root;


int (*classify)(struct sk_buff *,
const struct tcf_proto *,
struct tcf_result *);
__be16 protocol;


u32 prio;
void *data;
const struct tcf_proto_ops *ops;
struct tcf_chain *chain;



spinlock_t lock;
bool deleting;
refcount_t refcnt;
struct callback_head rcu;
struct hlist_node destroy_ht_node;
};

struct qdisc_skb_cb {
struct {
unsigned int pkt_len;
u16 slave_dev_queue_mapping;
u16 tc_classid;
};

unsigned char data[20];
};

typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);

struct tcf_chain {

struct mutex filter_chain_lock;
struct tcf_proto *filter_chain;
struct list_head list;
struct tcf_block *block;
u32 index;
unsigned int refcnt;
unsigned int action_refcnt;
bool explicitly_created;
bool flushing;
const struct tcf_proto_ops *tmplt_ops;
void *tmplt_priv;
struct callback_head rcu;
};

struct tcf_block {



struct mutex lock;
struct list_head chain_list;
u32 index;
u32 classid;
refcount_t refcnt;
struct net *net;
struct Qdisc *q;
struct rw_semaphore cb_lock;
struct flow_block flow_block;
struct list_head owner_list;
bool keep_dst;
atomic_t offloadcnt;
unsigned int nooffloaddevcnt;
unsigned int lockeddevcnt;
struct {
struct tcf_chain *chain;
struct list_head filter_chain_list;
} chain0;
struct callback_head rcu;
struct hlist_head proto_destroy_ht[1 << (7)];
struct mutex proto_destroy_lock;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
{
return lock_is_held(&(&chain->filter_chain_lock)->dep_map);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
{
return lock_is_held(&(&tp->lock)->dep_map);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
struct qdisc_skb_cb *qcb;

do { __attribute__((__noreturn__)) extern void __compiletime_assert_546(void) ; if (!(!(sizeof(skb->cb) < sizeof(*qcb)))) __compiletime_assert_546(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_547(void) ; if (!(!(sizeof(qcb->data) < sz))) __compiletime_assert_547(); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int qdisc_qlen_sum(const struct Qdisc *q)
{
__u32 qlen = q->qstats.qlen;
int i;

if (qdisc_is_percpu_stats(q)) {
for (((i)) = -1; ((i)) = cpumask_next(((i)), (((const struct cpumask *)&__cpu_possible_mask))), ((i)) < nr_cpu_ids;)
qlen += ({ do { const void *__vpp_verify = (typeof((q->cpu_qstats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((q->cpu_qstats))) *)((q->cpu_qstats))); (typeof((typeof(*((q->cpu_qstats))) *)((q->cpu_qstats)))) (__ptr + (((__per_cpu_offset[(i)])))); }); })->qlen;
} else {
qlen += q->q.qlen;
}

return qlen;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
{
return (struct qdisc_skb_cb *)skb->cb;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) spinlock_t *qdisc_lock(struct Qdisc *qdisc)
{
return &qdisc->q.lock;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
{
struct Qdisc *q = ({ typeof(*(qdisc->dev_queue->qdisc)) *__UNIQUE_ID_rcu548 = (typeof(*(qdisc->dev_queue->qdisc)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_549(void) ; if (!((sizeof((qdisc->dev_queue->qdisc)) == sizeof(char) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(short) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(int) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(long)) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(long long))) __compiletime_assert_549(); } while (0); (*(const volatile typeof( _Generic(((qdisc->dev_queue->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((qdisc->dev_queue->qdisc)))) *)&((qdisc->dev_queue->qdisc))); }); do { } while (0 && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))); ; ((typeof(*(qdisc->dev_queue->qdisc)) *)(__UNIQUE_ID_rcu548)); });

return q;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
{
return ({ typeof(*(qdisc->dev_queue->qdisc)) *__UNIQUE_ID_rcu550 = (typeof(*(qdisc->dev_queue->qdisc)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_551(void) ; if (!((sizeof((qdisc->dev_queue->qdisc)) == sizeof(char) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(short) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(int) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(long)) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(long long))) __compiletime_assert_551(); } while (0); (*(const volatile typeof( _Generic(((qdisc->dev_queue->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((qdisc->dev_queue->qdisc)))) *)&((qdisc->dev_queue->qdisc))); }); do { } while (0 && (!((0) || rcu_read_lock_bh_held()))); ; ((typeof(*(qdisc->dev_queue->qdisc)) *)(__UNIQUE_ID_rcu550)); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc_sleeping;
}
# 579 "./include/net/sch_generic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root(qdisc);

({ static bool __attribute__((__section__(".data.once"))) __already_done; bool __ret_do_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_do_once && !__already_done), 0)) { __already_done = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/net/sch_generic.h", 583); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/sch_generic.h"), "i" (583), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_do_once), 0); });
return qdisc_lock(root);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root_sleeping(qdisc);

({ static bool __attribute__((__section__(".data.once"))) __already_done; bool __ret_do_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_do_once && !__already_done), 0)) { __already_done = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/net/sch_generic.h", 591); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/sch_generic.h"), "i" (591), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_do_once), 0); });
return qdisc_lock(root);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->dev;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sch_tree_lock(struct Qdisc *q)
{
if (q->flags & 8)
spin_lock_bh(qdisc_lock(q));
else
spin_lock_bh(qdisc_root_sleeping_lock(q));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sch_tree_unlock(struct Qdisc *q)
{
if (q->flags & 8)
spin_unlock_bh(qdisc_lock(q));
else
spin_unlock_bh(qdisc_root_sleeping_lock(q));
}

extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
extern struct Qdisc_ops pfifo_fast_ops;
extern struct Qdisc_ops mq_qdisc_ops;
extern struct Qdisc_ops noqueue_qdisc_ops;
extern const struct Qdisc_ops *default_qdisc_ops;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct Qdisc_ops *
get_default_qdisc_ops(const struct net_device *dev, int ntx)
{
return ntx < dev->real_num_tx_queues ?
default_qdisc_ops : &pfifo_fast_ops;
}

struct Qdisc_class_common {
u32 classid;
struct hlist_node hnode;
};

struct Qdisc_class_hash {
struct hlist_head *hash;
unsigned int hashsize;
unsigned int hashmask;
unsigned int hashelems;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int qdisc_class_hash(u32 id, u32 mask)
{
id ^= id >> 8;
id ^= id >> 4;
return id & mask;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct Qdisc_class_common *
qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
{
struct Qdisc_class_common *cl;
unsigned int h;

if (!id)
return ((void *)0);

h = qdisc_class_hash(id, hash->hashmask);
for (cl = ({ typeof((&hash->hash[h])->first) ____ptr = ((&hash->hash[h])->first); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(cl)) *)0)->hnode)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(cl)) *)(__mptr - __builtin_offsetof(typeof(*(cl)), hnode))); }) : ((void *)0); }); cl; cl = ({ typeof((cl)->hnode.next) ____ptr = ((cl)->hnode.next); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(cl)) *)0)->hnode)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(cl)) *)(__mptr - __builtin_offsetof(typeof(*(cl)), hnode))); }) : ((void *)0); })) {
if (cl->classid == id)
return cl;
}
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
{
u32 hwtc = ((classid)&(0x0000FFFFU)) - 0xFFE0U;

return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -22;
}

int qdisc_class_hash_init(struct Qdisc_class_hash *);
void qdisc_class_hash_insert(struct Qdisc_class_hash *,
struct Qdisc_class_common *);
void qdisc_class_hash_remove(struct Qdisc_class_hash *,
struct Qdisc_class_common *);
void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
void qdisc_class_hash_destroy(struct Qdisc_class_hash *);

int dev_qdisc_change_tx_queue_len(struct net_device *dev);
void dev_qdisc_change_real_num_tx(struct net_device *dev,
unsigned int new_real_tx);
void dev_init_scheduler(struct net_device *dev);
void dev_shutdown(struct net_device *dev);
void dev_activate(struct net_device *dev);
void dev_deactivate(struct net_device *dev);
void dev_deactivate_many(struct list_head *head);
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
void qdisc_put(struct Qdisc *qdisc);
void qdisc_put_unlocked(struct Qdisc *qdisc);
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
# 702 "./include/net/sch_generic.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
void *type_data)
{
q->flags &= ~0x200;
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
struct Qdisc *new, struct Qdisc *old,
enum tc_setup_type type, void *type_data,
struct netlink_ext_ack *extack)
{
}

struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
struct netlink_ext_ack *extack);
void qdisc_free(struct Qdisc *qdisc);
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops, u32 parentid,
struct netlink_ext_ack *extack);
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab);
int skb_do_redirect(struct sk_buff *);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_at_tc_ingress(const struct sk_buff *skb)
{



return false;

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_skip_tc_classify(struct sk_buff *skb)
{






return false;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
{
struct Qdisc *qdisc;

for (; i < dev->num_tx_queues; i++) {
qdisc = ({ do { } while (0 && (!((lockdep_rtnl_is_held())))); ; ((typeof(*(netdev_get_tx_queue(dev, i)->qdisc)) *)((netdev_get_tx_queue(dev, i)->qdisc))); });
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
qdisc_reset(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
}
}
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool qdisc_all_tx_empty(const struct net_device *dev)
{
unsigned int i;

rcu_read_lock();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
const struct Qdisc *q = ({ typeof(*(txq->qdisc)) *__UNIQUE_ID_rcu552 = (typeof(*(txq->qdisc)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_553(void) ; if (!((sizeof((txq->qdisc)) == sizeof(char) || sizeof((txq->qdisc)) == sizeof(short) || sizeof((txq->qdisc)) == sizeof(int) || sizeof((txq->qdisc)) == sizeof(long)) || sizeof((txq->qdisc)) == sizeof(long long))) __compiletime_assert_553(); } while (0); (*(const volatile typeof( _Generic(((txq->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((txq->qdisc)))) *)&((txq->qdisc))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(txq->qdisc)) *)(__UNIQUE_ID_rcu552)); });

if (!qdisc_is_empty(q)) {
rcu_read_unlock();
return false;
}
}
rcu_read_unlock();
return true;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool qdisc_tx_changing(const struct net_device *dev)
{
unsigned int i;

for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
if (({ typeof(*(txq->qdisc)) *__UNIQUE_ID_rcu554 = (typeof(*(txq->qdisc)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_555(void) ; if (!((sizeof((txq->qdisc)) == sizeof(char) || sizeof((txq->qdisc)) == sizeof(short) || sizeof((txq->qdisc)) == sizeof(int) || sizeof((txq->qdisc)) == sizeof(long)) || sizeof((txq->qdisc)) == sizeof(long long))) __compiletime_assert_555(); } while (0); (*(const volatile typeof( _Generic(((txq->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((txq->qdisc)))) *)&((txq->qdisc))); }); ; ((typeof(*(txq->qdisc)) *)(__UNIQUE_ID_rcu554)); }) != txq->qdisc_sleeping)
return true;
}
return false;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool qdisc_tx_is_noop(const struct net_device *dev)
{
unsigned int i;

for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
if (({ typeof(*(txq->qdisc)) *__UNIQUE_ID_rcu556 = (typeof(*(txq->qdisc)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_557(void) ; if (!((sizeof((txq->qdisc)) == sizeof(char) || sizeof((txq->qdisc)) == sizeof(short) || sizeof((txq->qdisc)) == sizeof(int) || sizeof((txq->qdisc)) == sizeof(long)) || sizeof((txq->qdisc)) == sizeof(long long))) __compiletime_assert_557(); } while (0); (*(const volatile typeof( _Generic(((txq->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((txq->qdisc)))) *)&((txq->qdisc))); }); ; ((typeof(*(txq->qdisc)) *)(__UNIQUE_ID_rcu556)); }) != &noop_qdisc)
return false;
}
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int qdisc_pkt_len(const struct sk_buff *skb)
{
return qdisc_skb_cb(skb)->pkt_len;
}


enum net_xmit_qdisc_t {
__NET_XMIT_STOLEN = 0x00010000,
__NET_XMIT_BYPASS = 0x00020000,
};







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct Qdisc *sch)
{






}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
qdisc_calculate_pkt_len(skb, sch);
return sch->enqueue(skb, sch, to_free);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void _bstats_update(struct gnet_stats_basic_sync *bstats,
__u64 bytes, __u32 packets)
{
u64_stats_update_begin(&bstats->syncp);
u64_stats_add(&bstats->bytes, bytes);
u64_stats_add(&bstats->packets, packets);
u64_stats_update_end(&bstats->syncp);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bstats_update(struct gnet_stats_basic_sync *bstats,
const struct sk_buff *skb)
{
_bstats_update(bstats,
qdisc_pkt_len(skb),
skb_is_gso(skb) ? ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs : 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_bstats_cpu_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
bstats_update(({ do { const void *__vpp_verify = (typeof((sch->cpu_bstats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(sch->cpu_bstats)) *)(sch->cpu_bstats)); (typeof((typeof(*(sch->cpu_bstats)) *)(sch->cpu_bstats))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }), skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_bstats_update(struct Qdisc *sch,
const struct sk_buff *skb)
{
bstats_update(&sch->bstats, skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_qstats_backlog_dec(struct Qdisc *sch,
const struct sk_buff *skb)
{
sch->qstats.backlog -= qdisc_pkt_len(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
const struct sk_buff *skb)
{
do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->backlog)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog))); (typeof((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb)); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog))); (typeof((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb)); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog))); (typeof((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb)); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog))); (typeof((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb)); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_qstats_backlog_inc(struct Qdisc *sch,
const struct sk_buff *skb)
{
sch->qstats.backlog += qdisc_pkt_len(skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
const struct sk_buff *skb)
{
do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->backlog)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog))); (typeof((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += qdisc_pkt_len(skb); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog))); (typeof((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += qdisc_pkt_len(skb); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog))); (typeof((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += qdisc_pkt_len(skb); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog))); (typeof((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += qdisc_pkt_len(skb); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
{
do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->qlen)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->qlen)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->qlen)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen))); (typeof((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->qlen)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen))); (typeof((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->qlen)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen))); (typeof((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->qlen)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen))); (typeof((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
{
do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->qlen)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->qlen)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->qlen)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen))); (typeof((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(sch->cpu_qstats->qlen))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->qlen)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen))); (typeof((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(sch->cpu_qstats->qlen))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->qlen)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen))); (typeof((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(sch->cpu_qstats->qlen))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->qlen)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen))); (typeof((typeof(*(&(sch->cpu_qstats->qlen))) *)(&(sch->cpu_qstats->qlen)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(sch->cpu_qstats->qlen))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
{
do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->requeues)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->requeues)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->requeues)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->requeues))) *)(&(sch->cpu_qstats->requeues))); (typeof((typeof(*(&(sch->cpu_qstats->requeues))) *)(&(sch->cpu_qstats->requeues)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->requeues)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->requeues))) *)(&(sch->cpu_qstats->requeues))); (typeof((typeof(*(&(sch->cpu_qstats->requeues))) *)(&(sch->cpu_qstats->requeues)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->requeues)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->requeues))) *)(&(sch->cpu_qstats->requeues))); (typeof((typeof(*(&(sch->cpu_qstats->requeues))) *)(&(sch->cpu_qstats->requeues)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->requeues)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->requeues))) *)(&(sch->cpu_qstats->requeues))); (typeof((typeof(*(&(sch->cpu_qstats->requeues))) *)(&(sch->cpu_qstats->requeues)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __qdisc_qstats_drop(struct Qdisc *sch, int count)
{
sch->qstats.drops += count;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qstats_drop_inc(struct gnet_stats_queue *qstats)
{
qstats->drops++;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
{
qstats->overlimits++;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_qstats_drop(struct Qdisc *sch)
{
qstats_drop_inc(&sch->qstats);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_qstats_cpu_drop(struct Qdisc *sch)
{
do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->drops)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->drops)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->drops)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->drops))) *)(&(sch->cpu_qstats->drops))); (typeof((typeof(*(&(sch->cpu_qstats->drops))) *)(&(sch->cpu_qstats->drops)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->drops)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->drops))) *)(&(sch->cpu_qstats->drops))); (typeof((typeof(*(&(sch->cpu_qstats->drops))) *)(&(sch->cpu_qstats->drops)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->drops)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->drops))) *)(&(sch->cpu_qstats->drops))); (typeof((typeof(*(&(sch->cpu_qstats->drops))) *)(&(sch->cpu_qstats->drops)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->drops)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->drops))) *)(&(sch->cpu_qstats->drops))); (typeof((typeof(*(&(sch->cpu_qstats->drops))) *)(&(sch->cpu_qstats->drops)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_qstats_overlimit(struct Qdisc *sch)
{
sch->qstats.overlimits++;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
{
__u32 qlen = qdisc_qlen_sum(sch);

return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
__u32 *backlog)
{
struct gnet_stats_queue qstats = { 0 };

gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
*qlen = qstats.qlen + qdisc_qlen(sch);
*backlog = qstats.backlog;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_tree_flush_backlog(struct Qdisc *sch)
{
__u32 qlen, backlog;

qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
qdisc_tree_reduce_backlog(sch, qlen, backlog);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_purge_queue(struct Qdisc *sch)
{
__u32 qlen, backlog;

qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
qdisc_reset(sch);
qdisc_tree_reduce_backlog(sch, qlen, backlog);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_skb_head_init(struct qdisc_skb_head *qh)
{
qh->head = ((void *)0);
qh->tail = ((void *)0);
qh->qlen = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __qdisc_enqueue_tail(struct sk_buff *skb,
struct qdisc_skb_head *qh)
{
struct sk_buff *last = qh->tail;

if (last) {
skb->next = ((void *)0);
last->next = skb;
qh->tail = skb;
} else {
qh->tail = skb;
qh->head = skb;
}
qh->qlen++;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
{
__qdisc_enqueue_tail(skb, &sch->q);
qdisc_qstats_backlog_inc(sch, skb);
return 0x00;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __qdisc_enqueue_head(struct sk_buff *skb,
struct qdisc_skb_head *qh)
{
skb->next = qh->head;

if (!qh->head)
qh->tail = skb;
qh->head = skb;
qh->qlen++;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
{
struct sk_buff *skb = qh->head;

if (__builtin_expect(!!(skb != ((void *)0)), 1)) {
qh->head = skb->next;
qh->qlen--;
if (qh->head == ((void *)0))
qh->tail = ((void *)0);
skb->next = ((void *)0);
}

return skb;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);

if (__builtin_expect(!!(skb != ((void *)0)), 1)) {
qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);
}

return skb;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
{
skb->next = *to_free;
*to_free = skb;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __qdisc_drop_all(struct sk_buff *skb,
struct sk_buff **to_free)
{
if (skb->prev)
skb->prev->next = *to_free;
else
skb->next = *to_free;
*to_free = skb;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct qdisc_skb_head *qh,
struct sk_buff **to_free)
{
struct sk_buff *skb = __qdisc_dequeue_head(qh);

if (__builtin_expect(!!(skb != ((void *)0)), 1)) {
unsigned int len = qdisc_pkt_len(skb);

qdisc_qstats_backlog_dec(sch, skb);
__qdisc_drop(skb, to_free);
return len;
}

return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
const struct qdisc_skb_head *qh = &sch->q;

return qh->head;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
{
struct sk_buff *skb = skb_peek(&sch->gso_skb);


if (!skb) {
skb = sch->dequeue(sch);

if (skb) {
__skb_queue_head(&sch->gso_skb, skb);

qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
}
}

return skb;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
struct sk_buff *skb)
{
if (qdisc_is_percpu_stats(sch)) {
qdisc_qstats_cpu_backlog_dec(sch, skb);
qdisc_bstats_cpu_update(sch, skb);
qdisc_qstats_cpu_qlen_dec(sch);
} else {
qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
unsigned int pkt_len)
{
if (qdisc_is_percpu_stats(sch)) {
qdisc_qstats_cpu_qlen_inc(sch);
do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->backlog)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog))); (typeof((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += pkt_len; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog))); (typeof((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += pkt_len; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog))); (typeof((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += pkt_len; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog))); (typeof((typeof(*(&(sch->cpu_qstats->backlog))) *)(&(sch->cpu_qstats->backlog)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += pkt_len; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
} else {
sch->qstats.backlog += pkt_len;
sch->q.qlen++;
}
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
{
struct sk_buff *skb = skb_peek(&sch->gso_skb);

if (skb) {
skb = __skb_dequeue(&sch->gso_skb);
if (qdisc_is_percpu_stats(sch)) {
qdisc_qstats_cpu_backlog_dec(sch, skb);
qdisc_qstats_cpu_qlen_dec(sch);
} else {
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
}
} else {
skb = sch->dequeue(sch);
}

return skb;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __qdisc_reset_queue(struct qdisc_skb_head *qh)
{




({ static bool __attribute__((__section__(".data.once"))) __already_done; bool __ret_do_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_do_once && !__already_done), 0)) { __already_done = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { do { } while(0); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/net/sch_generic.h", 1160); do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/net/sch_generic.h"), "i" (1160), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); do { } while(0); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_do_once), 0); });
if (qh->qlen) {
rtnl_kfree_skbs(qh->head, qh->tail);

qh->head = ((void *)0);
qh->tail = ((void *)0);
qh->qlen = 0;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void qdisc_reset_queue(struct Qdisc *sch)
{
__qdisc_reset_queue(&sch->q);
sch->qstats.backlog = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
struct Qdisc **pold)
{
struct Qdisc *old;

sch_tree_lock(sch);
old = *pold;
*pold = new;
if (old != ((void *)0))
qdisc_purge_queue(old);
sch_tree_unlock(sch);

return old;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
{
rtnl_kfree_skbs(skb, skb);
qdisc_qstats_drop(sch);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
__qdisc_drop(skb, to_free);
qdisc_qstats_cpu_drop(sch);

return 0x01;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
__qdisc_drop(skb, to_free);
qdisc_qstats_drop(sch);

return 0x01;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
__qdisc_drop_all(skb, to_free);
qdisc_qstats_drop(sch);

return 0x01;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
{
int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
if (slot < 0)
slot = 0;
slot >>= rtab->rate.cell_log;
if (slot > 255)
return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
return rtab->data[slot];
}

struct psched_ratecfg {
u64 rate_bytes_ps;
u32 mult;
u16 overhead;
u16 mpu;
u8 linklayer;
u8 shift;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 psched_l2t_ns(const struct psched_ratecfg *r,
unsigned int len)
{
len += r->overhead;

if (len < r->mpu)
len = r->mpu;

if (__builtin_expect(!!(r->linklayer == TC_LINKLAYER_ATM), 0))
return ((u64)((((len) + (48) - 1) / (48))*53) * r->mult) >> r->shift;

return ((u64)len * r->mult) >> r->shift;
}

void psched_ratecfg_precompute(struct psched_ratecfg *r,
const struct tc_ratespec *conf,
u64 rate64);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void psched_ratecfg_getrate(struct tc_ratespec *res,
const struct psched_ratecfg *r)
{
memset(res, 0, sizeof(*res));





res->rate = __builtin_choose_expr(((!!(sizeof((typeof((u64)(r->rate_bytes_ps)) *)1 == (typeof((u64)(~0U)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(r->rate_bytes_ps)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(~0U)) * 0l)) : (int *)8))))), (((u64)(r->rate_bytes_ps)) < ((u64)(~0U)) ? ((u64)(r->rate_bytes_ps)) : ((u64)(~0U))), ({ typeof((u64)(r->rate_bytes_ps)) __UNIQUE_ID___x558 = ((u64)(r->rate_bytes_ps)); typeof((u64)(~0U)) __UNIQUE_ID___y559 = ((u64)(~0U)); ((__UNIQUE_ID___x558) < (__UNIQUE_ID___y559) ? (__UNIQUE_ID___x558) : (__UNIQUE_ID___y559)); }));

res->overhead = r->overhead;
res->mpu = r->mpu;
res->linklayer = (r->linklayer & 0x0F);
}

struct psched_pktrate {
u64 rate_pkts_ps;
u32 mult;
u8 shift;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 psched_pkt2t_ns(const struct psched_pktrate *r,
unsigned int pkt_num)
{
return ((u64)pkt_num * r->mult) >> r->shift;
}

void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);




struct mini_Qdisc {
struct tcf_proto *filter_list;
struct tcf_block *block;
struct gnet_stats_basic_sync *cpu_bstats;
struct gnet_stats_queue *cpu_qstats;
unsigned long rcu_state;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
const struct sk_buff *skb)
{
bstats_update(({ do { const void *__vpp_verify = (typeof((miniq->cpu_bstats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(miniq->cpu_bstats)) *)(miniq->cpu_bstats)); (typeof((typeof(*(miniq->cpu_bstats)) *)(miniq->cpu_bstats))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }), skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
{
do { do { const void *__vpp_verify = (typeof((&(miniq->cpu_qstats->drops)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(miniq->cpu_qstats->drops)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(miniq->cpu_qstats->drops)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(miniq->cpu_qstats->drops))) *)(&(miniq->cpu_qstats->drops))); (typeof((typeof(*(&(miniq->cpu_qstats->drops))) *)(&(miniq->cpu_qstats->drops)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(miniq->cpu_qstats->drops)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(miniq->cpu_qstats->drops))) *)(&(miniq->cpu_qstats->drops))); (typeof((typeof(*(&(miniq->cpu_qstats->drops))) *)(&(miniq->cpu_qstats->drops)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(miniq->cpu_qstats->drops)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(miniq->cpu_qstats->drops))) *)(&(miniq->cpu_qstats->drops))); (typeof((typeof(*(&(miniq->cpu_qstats->drops))) *)(&(miniq->cpu_qstats->drops)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(miniq->cpu_qstats->drops)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(miniq->cpu_qstats->drops))) *)(&(miniq->cpu_qstats->drops))); (typeof((typeof(*(&(miniq->cpu_qstats->drops))) *)(&(miniq->cpu_qstats->drops)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}

struct mini_Qdisc_pair {
struct mini_Qdisc miniq1;
struct mini_Qdisc miniq2;
struct mini_Qdisc **p_miniq;
};

void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
struct tcf_proto *tp_head);
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc **p_miniq);
void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
struct tcf_block *block);

void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);

int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
# 27 "./include/linux/filter.h" 2


# 1 "./include/uapi/linux/filter.h" 1
# 24 "./include/uapi/linux/filter.h"
struct sock_filter {
__u16 code;
__u8 jt;
__u8 jf;
__u32 k;
};

struct sock_fprog {
unsigned short len;
struct sock_filter *filter;
};
# 30 "./include/linux/filter.h" 2

struct sk_buff;
struct sock;
struct seccomp_data;
struct bpf_prog_aux;
struct xdp_rxq_info;
struct xdp_buff;
struct sock_reuseport;
struct ctl_table;
struct ctl_table_header;
# 181 "./include/linux/filter.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool insn_is_zext(const struct bpf_insn *insn)
{
return insn->code == (0x04 | 0xb0 | 0x08) && insn->imm == 1;
}
# 537 "./include/linux/filter.h"
struct compat_sock_fprog {
u16 len;
compat_uptr_t filter;
};

struct sock_fprog_kern {
u16 len;
struct sock_filter *filter;
};




struct bpf_binary_header {
u32 size;
u8 image[] __attribute__((__aligned__(8)));
};

struct bpf_prog_stats {
u64_stats_t cnt;
u64_stats_t nsecs;
u64_stats_t misses;
struct u64_stats_sync syncp;
} __attribute__((__aligned__(2 * sizeof(u64))));

struct bpf_prog {
u16 pages;
u16 jited:1,
jit_requested:1,
gpl_compatible:1,
cb_access:1,
dst_needed:1,
blinding_requested:1,
blinded:1,
is_func:1,
kprobe_override:1,
has_callchain_buf:1,
enforce_expected_attach_type:1,
call_get_stack:1,
call_get_func_ip:1,
tstamp_type_access:1;
enum bpf_prog_type type;
enum bpf_attach_type expected_attach_type;
u32 len;
u32 jited_len;
u8 tag[8];
struct bpf_prog_stats *stats;
int *active;
unsigned int (*bpf_func)(const void *ctx,
const struct bpf_insn *insn);
struct bpf_prog_aux *aux;
struct sock_fprog_kern *orig_prog;

union {
struct { struct { } __empty_insns; struct sock_filter insns[]; };
struct { struct { } __empty_insnsi; struct bpf_insn insnsi[]; };
};
};

struct sk_filter {
refcount_t refcnt;
struct callback_head rcu;
struct bpf_prog *prog;
};

extern struct static_key_false bpf_stats_enabled_key;

typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
const struct bpf_insn *insnsi,
unsigned int (*bpf_func)(const void *,
const struct bpf_insn *));

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) u32 __bpf_prog_run(const struct bpf_prog *prog,
const void *ctx,
bpf_dispatcher_fn dfunc)
{
u32 ret;

do { if (1) __cant_migrate("include/linux/filter.h", 615); } while (0);
if (__builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&bpf_stats_enabled_key)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&bpf_stats_enabled_key)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&bpf_stats_enabled_key)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&bpf_stats_enabled_key)->key) > 0; })), 0)) {
struct bpf_prog_stats *stats;
u64 start = sched_clock();
unsigned long flags;

ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
stats = ({ do { const void *__vpp_verify = (typeof((prog->stats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(prog->stats)) *)(prog->stats)); (typeof((typeof(*(prog->stats)) *)(prog->stats))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });
flags = u64_stats_update_begin_irqsave(&stats->syncp);
u64_stats_inc(&stats->cnt);
u64_stats_add(&stats->nsecs, sched_clock() - start);
u64_stats_update_end_irqrestore(&stats->syncp, flags);
} else {
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
}
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
{
return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
}
# 646 "./include/linux/filter.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
const void *ctx)
{
u32 ret;

migrate_disable();
ret = bpf_prog_run(prog, ctx);
migrate_enable();
return ret;
}



struct bpf_skb_data_end {
struct qdisc_skb_cb qdisc_cb;
void *data_meta;
void *data_end;
};

struct bpf_nh_params {
u32 nh_family;
union {
u32 ipv4_nh;
struct in6_addr ipv6_nh;
};
};

struct bpf_redirect_info {
u32 flags;
u32 tgt_index;
void *tgt_value;
struct bpf_map *map;
u32 map_id;
enum bpf_map_type map_type;
u32 kern_flags;
struct bpf_nh_params nh;
};

extern __attribute__((section(".data..percpu" ""))) __typeof__(struct bpf_redirect_info) bpf_redirect_info;
# 695 "./include/linux/filter.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_compute_data_pointers(struct sk_buff *skb)
{
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;

do { __attribute__((__noreturn__)) extern void __compiletime_assert_560(void) ; if (!(!(sizeof(*cb) > sizeof((((struct sk_buff *)0)->cb))))) __compiletime_assert_560(); } while (0);
cb->data_meta = skb->data - skb_metadata_len(skb);
cb->data_end = skb->data + skb_headlen(skb);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_compute_and_save_data_end(
struct sk_buff *skb, void **saved_data_end)
{
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;

*saved_data_end = cb->data_end;
cb->data_end = skb->data + skb_headlen(skb);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_restore_data_end(
struct sk_buff *skb, void *saved_data_end)
{
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;

cb->data_end = saved_data_end;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 *bpf_skb_cb(const struct sk_buff *skb)
{
# 737 "./include/linux/filter.h"
do { __attribute__((__noreturn__)) extern void __compiletime_assert_561(void) ; if (!(!(sizeof((((struct __sk_buff *)0)->cb)) != 20))) __compiletime_assert_561(); } while (0);
do { __attribute__((__noreturn__)) extern void __compiletime_assert_562(void) ; if (!(!(sizeof((((struct __sk_buff *)0)->cb)) != sizeof((((struct qdisc_skb_cb *)0)->data))))) __compiletime_assert_562(); } while (0);


return qdisc_skb_cb(skb)->data;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
const void *ctx)
{
const struct sk_buff *skb = ctx;
u8 *cb_data = bpf_skb_cb(skb);
u8 cb_saved[20];
u32 res;

if (__builtin_expect(!!(prog->cb_access), 0)) {
memcpy(cb_saved, cb_data, sizeof(cb_saved));
memset(cb_data, 0, sizeof(cb_saved));
}

res = bpf_prog_run(prog, skb);

if (__builtin_expect(!!(prog->cb_access), 0))
memcpy(cb_data, cb_saved, sizeof(cb_saved));

return res;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
u32 res;

migrate_disable();
res = __bpf_prog_run_save_cb(prog, skb);
migrate_enable();
return res;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
u8 *cb_data = bpf_skb_cb(skb);
u32 res;

if (__builtin_expect(!!(prog->cb_access), 0))
memset(cb_data, 0, 20);

res = bpf_prog_run_pin_on_cpu(prog, skb);
return res;
}



extern struct static_key_false bpf_master_redirect_enabled_key;

u32 xdp_master_redirect(struct xdp_buff *xdp);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{




u32 act = __bpf_prog_run(prog, xdp, bpf_dispatcher_nop_func);

if (__builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&bpf_master_redirect_enabled_key)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&bpf_master_redirect_enabled_key)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&bpf_master_redirect_enabled_key)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&bpf_master_redirect_enabled_key)->key) > 0; })), 0)) {
if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
act = xdp_master_redirect(xdp);
}

return act;
}

void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 bpf_prog_insn_size(const struct bpf_prog *prog)
{
return prog->len * sizeof(struct bpf_insn);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
{
return ((((bpf_prog_insn_size(prog) + sizeof(__be64) + 1)-1) | ((__typeof__(bpf_prog_insn_size(prog) + sizeof(__be64) + 1))((64)-1)))+1);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int bpf_prog_size(unsigned int proglen)
{
return __builtin_choose_expr(((!!(sizeof((typeof(sizeof(struct bpf_prog)) *)1 == (typeof(__builtin_offsetof(struct bpf_prog, insns[proglen])) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(sizeof(struct bpf_prog)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(__builtin_offsetof(struct bpf_prog, insns[proglen])) * 0l)) : (int *)8))))), ((sizeof(struct bpf_prog)) > (__builtin_offsetof(struct bpf_prog, insns[proglen])) ? (sizeof(struct bpf_prog)) : (__builtin_offsetof(struct bpf_prog, insns[proglen]))), ({ typeof(sizeof(struct bpf_prog)) __UNIQUE_ID___x563 = (sizeof(struct bpf_prog)); typeof(__builtin_offsetof(struct bpf_prog, insns[proglen])) __UNIQUE_ID___y564 = (__builtin_offsetof(struct bpf_prog, insns[proglen])); ((__UNIQUE_ID___x563) > (__UNIQUE_ID___y564) ? (__UNIQUE_ID___x563) : (__UNIQUE_ID___y564)); }));

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_prog_was_classic(const struct bpf_prog *prog)
{





return prog->type == BPF_PROG_TYPE_UNSPEC;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 bpf_ctx_off_adjust_machine(u32 size)
{
const u32 size_machine = sizeof(unsigned long);

if (size > size_machine && size % size_machine == 0)
size = size_machine;

return size;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
{
return size <= size_default && (size & (size - 1)) == 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8
bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
{
u8 access_off = off & (size_default - 1);


return access_off;



}
# 878 "./include/linux/filter.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_prog_lock_ro(struct bpf_prog *fp)
{

if (!fp->jited) {
set_vm_flush_reset_perms(fp);
set_memory_ro((unsigned long)fp, fp->pages);
}

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
set_vm_flush_reset_perms(hdr);
set_memory_ro((unsigned long)hdr, hdr->size >> (12));
set_memory_x((unsigned long)hdr, hdr->size >> (12));
}

int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int sk_filter(struct sock *sk, struct sk_buff *skb)
{
return sk_filter_trim_cap(sk, skb, 1);
}

struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp);

bool bpf_opcode_in_insntable(u8 code);

void bpf_prog_free_linfo(struct bpf_prog *prog);
void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
const u32 *insn_to_jit_off);
int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
void bpf_prog_jit_attempt_done(struct bpf_prog *prog);

struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
void __bpf_prog_free(struct bpf_prog *fp);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_prog_unlock_free(struct bpf_prog *fp)
{
__bpf_prog_free(fp);
}

typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
unsigned int flen);

int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bpf_aux_classic_check_t trans, bool save_orig);
void bpf_prog_destroy(struct bpf_prog *fp);

int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
void sk_reuseport_prog_free(struct bpf_prog *prog);
int sk_detach_filter(struct sock *sk);
int sk_get_filter(struct sock *sk, struct sock_filter *filter,
unsigned int len);

bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);

u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);




struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_jit_needs_zext(void);
bool bpf_jit_supports_kfunc_call(void);
bool bpf_helper_changes_pkt_data(void *func);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_dump_raw_ok(const struct cred *cred)
{



return kallsyms_show_value(cred);
}

struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);

void bpf_clear_redirect_map(struct bpf_map *map);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xdp_return_frame_no_direct(void)
{
struct bpf_redirect_info *ri = ({ do { const void *__vpp_verify = (typeof((&bpf_redirect_info) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&bpf_redirect_info)) *)(&bpf_redirect_info)); (typeof((typeof(*(&bpf_redirect_info)) *)(&bpf_redirect_info))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });

return ri->kern_flags & ((((1UL))) << (0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xdp_set_return_frame_no_direct(void)
{
struct bpf_redirect_info *ri = ({ do { const void *__vpp_verify = (typeof((&bpf_redirect_info) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&bpf_redirect_info)) *)(&bpf_redirect_info)); (typeof((typeof(*(&bpf_redirect_info)) *)(&bpf_redirect_info))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });

ri->kern_flags |= ((((1UL))) << (0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xdp_clear_return_frame_no_direct(void)
{
struct bpf_redirect_info *ri = ({ do { const void *__vpp_verify = (typeof((&bpf_redirect_info) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&bpf_redirect_info)) *)(&bpf_redirect_info)); (typeof((typeof(*(&bpf_redirect_info)) *)(&bpf_redirect_info))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });

ri->kern_flags &= ~((((1UL))) << (0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xdp_ok_fwd_dev(const struct net_device *fwd,
unsigned int pktlen)
{
unsigned int len;

if (__builtin_expect(!!(!(fwd->flags & IFF_UP)), 0))
return -100;

len = fwd->mtu + fwd->hard_header_len + 4;
if (pktlen > len)
return -90;

return 0;
}







int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
struct xdp_buff *xdp, struct bpf_prog *prog);
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *prog);
int xdp_do_redirect_frame(struct net_device *dev,
struct xdp_buff *xdp,
struct xdp_frame *xdpf,
struct bpf_prog *prog);
void xdp_do_flush(void);







void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act);


struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
struct sock *migrating_sk,
u32 hash);
# 1171 "./include/linux/filter.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ebpf_jit_enabled(void)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
struct bpf_jit_poke_descriptor *poke)
{
return -524;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_jit_free(struct bpf_prog *fp)
{
bpf_prog_unlock_free(fp);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_jit_kallsyms_enabled(void)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *
__bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_bpf_text_address(unsigned long addr)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
char *type, char *sym)
{
return -34;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_prog_kallsyms_add(struct bpf_prog *fp)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_prog_kallsyms_del(struct bpf_prog *fp)
{
}



void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_needs_clear_a(const struct sock_filter *first)
{
switch (first->code) {
case 0x06 | 0x00:
case 0x00 | 0x00 | 0x80:
return false;

case 0x00 | 0x00 | 0x20:
case 0x00 | 0x08 | 0x20:
case 0x00 | 0x10 | 0x20:
if (first->k == (-0x1000) + 40)
return true;
return false;

default:
return true;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u16 bpf_anc_helper(const struct sock_filter *ftest)
{
do { if (__builtin_expect(!!(ftest->code & ((((1UL))) << (15))), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/filter.h"), "i" (1263), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);

switch (ftest->code) {
case 0x00 | 0x00 | 0x20:
case 0x00 | 0x08 | 0x20:
case 0x00 | 0x10 | 0x20:


switch (ftest->k) {
case (-0x1000) + 0: return ((((1UL))) << (15)) | 0;
case (-0x1000) + 4: return ((((1UL))) << (15)) | 4;
case (-0x1000) + 8: return ((((1UL))) << (15)) | 8;
case (-0x1000) + 12: return ((((1UL))) << (15)) | 12;
case (-0x1000) + 16: return ((((1UL))) << (15)) | 16;
case (-0x1000) + 20: return ((((1UL))) << (15)) | 20;
case (-0x1000) + 24: return ((((1UL))) << (15)) | 24;
case (-0x1000) + 28: return ((((1UL))) << (15)) | 28;
case (-0x1000) + 32: return ((((1UL))) << (15)) | 32;
case (-0x1000) + 36: return ((((1UL))) << (15)) | 36;
case (-0x1000) + 40: return ((((1UL))) << (15)) | 40;
case (-0x1000) + 44: return ((((1UL))) << (15)) | 44;
case (-0x1000) + 48: return ((((1UL))) << (15)) | 48;
case (-0x1000) + 52: return ((((1UL))) << (15)) | 52;
case (-0x1000) + 56: return ((((1UL))) << (15)) | 56;
case (-0x1000) + 60: return ((((1UL))) << (15)) | 60;
}
__attribute__((__fallthrough__));
default:
return ftest->code;
}
}

void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
int k, unsigned int size);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int bpf_tell_extensions(void)
{
return 64;
}

struct bpf_sock_addr_kern {
struct sock *sk;
struct sockaddr *uaddr;




u64 tmp_reg;
void *t_ctx;
};

struct bpf_sock_ops_kern {
struct sock *sk;
union {
u32 args[4];
u32 reply;
u32 replylong[4];
};
struct sk_buff *syn_skb;
struct sk_buff *skb;
void *skb_data_end;
u8 op;
u8 is_fullsock;
u8 remaining_opt_len;
u64 temp;
# 1336 "./include/linux/filter.h"
};

struct bpf_sysctl_kern {
struct ctl_table_header *head;
struct ctl_table *table;
void *cur_val;
size_t cur_len;
void *new_val;
size_t new_len;
int new_updated;
int write;
loff_t *ppos;

u64 tmp_reg;
};


struct bpf_sockopt_buf {
u8 data[32];
};

struct bpf_sockopt_kern {
struct sock *sk;
u8 *optval;
u8 *optval_end;
s32 level;
s32 optname;
s32 optlen;

struct task_struct *current_task;

u64 tmp_reg;
};

int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);

struct bpf_sk_lookup_kern {
u16 family;
u16 protocol;
__be16 sport;
u16 dport;
struct {
__be32 saddr;
__be32 daddr;
} v4;
struct {
const struct in6_addr *saddr;
const struct in6_addr *daddr;
} v6;
struct sock *selected_sk;
u32 ingress_ifindex;
bool no_reuseport;
};

extern struct static_key_false bpf_sk_lookup_enabled;
# 1446 "./include/linux/filter.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 dport,
const int ifindex, struct sock **psk)
{
struct bpf_prog_array *run_array;
struct sock *selected_sk = ((void *)0);
bool no_reuseport = false;

rcu_read_lock();
run_array = ({ typeof(*(net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) *__UNIQUE_ID_rcu565 = (typeof(*(net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_566(void) ; if (!((sizeof((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) == sizeof(char) || sizeof((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) == sizeof(short) || sizeof((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) == sizeof(int) || sizeof((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) == sizeof(long)) || sizeof((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) == sizeof(long long))) __compiletime_assert_566(); } while (0); (*(const volatile typeof( _Generic(((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])))) *)&((net->bpf.run_array[NETNS_BPF_SK_LOOKUP]))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) *)(__UNIQUE_ID_rcu565)); });
if (run_array) {
struct bpf_sk_lookup_kern ctx = {
.family = 2,
.protocol = protocol,
.v4.saddr = saddr,
.v4.daddr = daddr,
.sport = sport,
.dport = dport,
.ingress_ifindex = ifindex,
};
u32 act;

act = ({ struct bpf_sk_lookup_kern *_ctx = &(ctx); struct bpf_prog_array_item *_item; struct sock *_selected_sk = ((void *)0); bool _no_reuseport = false; struct bpf_prog *_prog; bool _all_pass = true; u32 _ret; migrate_disable(); _item = &(run_array)->items[0]; while ((_prog = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_567(void) ; if (!((sizeof(_item->prog) == sizeof(char) || sizeof(_item->prog) == sizeof(short) || sizeof(_item->prog) == sizeof(int) || sizeof(_item->prog) == sizeof(long)) || sizeof(_item->prog) == sizeof(long long))) __compiletime_assert_567(); } while (0); (*(const volatile typeof( _Generic((_item->prog), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (_item->prog))) *)&(_item->prog)); }))) { _ctx->selected_sk = _selected_sk; _ctx->no_reuseport = _no_reuseport; _ret = bpf_prog_run(_prog, _ctx); if (_ret == SK_PASS && _ctx->selected_sk) { _selected_sk = _ctx->selected_sk; _no_reuseport = _ctx->no_reuseport; } else if (_ret == SK_DROP && _all_pass) { _all_pass = false; } _item++; } _ctx->selected_sk = _selected_sk; _ctx->no_reuseport = _no_reuseport; migrate_enable(); _all_pass || _selected_sk ? SK_PASS : SK_DROP; });
if (act == SK_PASS) {
selected_sk = ctx.selected_sk;
no_reuseport = ctx.no_reuseport;
} else {
selected_sk = ERR_PTR(-111);
}
}
rcu_read_unlock();
*psk = selected_sk;
return no_reuseport;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
const struct in6_addr *saddr,
const __be16 sport,
const struct in6_addr *daddr,
const u16 dport,
const int ifindex, struct sock **psk)
{
struct bpf_prog_array *run_array;
struct sock *selected_sk = ((void *)0);
bool no_reuseport = false;

rcu_read_lock();
run_array = ({ typeof(*(net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) *__UNIQUE_ID_rcu568 = (typeof(*(net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_569(void) ; if (!((sizeof((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) == sizeof(char) || sizeof((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) == sizeof(short) || sizeof((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) == sizeof(int) || sizeof((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) == sizeof(long)) || sizeof((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) == sizeof(long long))) __compiletime_assert_569(); } while (0); (*(const volatile typeof( _Generic(((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((net->bpf.run_array[NETNS_BPF_SK_LOOKUP])))) *)&((net->bpf.run_array[NETNS_BPF_SK_LOOKUP]))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(net->bpf.run_array[NETNS_BPF_SK_LOOKUP])) *)(__UNIQUE_ID_rcu568)); });
if (run_array) {
struct bpf_sk_lookup_kern ctx = {
.family = 10,
.protocol = protocol,
.v6.saddr = saddr,
.v6.daddr = daddr,
.sport = sport,
.dport = dport,
.ingress_ifindex = ifindex,
};
u32 act;

act = ({ struct bpf_sk_lookup_kern *_ctx = &(ctx); struct bpf_prog_array_item *_item; struct sock *_selected_sk = ((void *)0); bool _no_reuseport = false; struct bpf_prog *_prog; bool _all_pass = true; u32 _ret; migrate_disable(); _item = &(run_array)->items[0]; while ((_prog = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_570(void) ; if (!((sizeof(_item->prog) == sizeof(char) || sizeof(_item->prog) == sizeof(short) || sizeof(_item->prog) == sizeof(int) || sizeof(_item->prog) == sizeof(long)) || sizeof(_item->prog) == sizeof(long long))) __compiletime_assert_570(); } while (0); (*(const volatile typeof( _Generic((_item->prog), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (_item->prog))) *)&(_item->prog)); }))) { _ctx->selected_sk = _selected_sk; _ctx->no_reuseport = _no_reuseport; _ret = bpf_prog_run(_prog, _ctx); if (_ret == SK_PASS && _ctx->selected_sk) { _selected_sk = _ctx->selected_sk; _no_reuseport = _ctx->no_reuseport; } else if (_ret == SK_DROP && _all_pass) { _all_pass = false; } _item++; } _ctx->selected_sk = _selected_sk; _ctx->no_reuseport = _no_reuseport; migrate_enable(); _all_pass || _selected_sk ? SK_PASS : SK_DROP; });
if (act == SK_PASS) {
selected_sk = ctx.selected_sk;
no_reuseport = ctx.no_reuseport;
} else {
selected_sk = ERR_PTR(-111);
}
}
rcu_read_unlock();
*psk = selected_sk;
return no_reuseport;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex,
u64 flags, const u64 flag_mask,
void *lookup_elem(struct bpf_map *map, u32 key))
{
struct bpf_redirect_info *ri = ({ do { const void *__vpp_verify = (typeof((&bpf_redirect_info) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&bpf_redirect_info)) *)(&bpf_redirect_info)); (typeof((typeof(*(&bpf_redirect_info)) *)(&bpf_redirect_info))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });
const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;


if (__builtin_expect(!!(flags & ~(action_mask | flag_mask)), 0))
return XDP_ABORTED;

ri->tgt_value = lookup_elem(map, ifindex);
if (__builtin_expect(!!(!ri->tgt_value), 0) && !(flags & BPF_F_BROADCAST)) {





ri->map_id = ((int)(~0U >> 1));
ri->map_type = BPF_MAP_TYPE_UNSPEC;
return flags & action_mask;
}

ri->tgt_index = ifindex;
ri->map_id = map->id;
ri->map_type = map->map_type;

if (flags & BPF_F_BROADCAST) {
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_571(void) ; if (!((sizeof(ri->map) == sizeof(char) || sizeof(ri->map) == sizeof(short) || sizeof(ri->map) == sizeof(int) || sizeof(ri->map) == sizeof(long)) || sizeof(ri->map) == sizeof(long long))) __compiletime_assert_571(); } while (0); do { *(volatile typeof(ri->map) *)&(ri->map) = (map); } while (0); } while (0);
ri->flags = flags;
} else {
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_572(void) ; if (!((sizeof(ri->map) == sizeof(char) || sizeof(ri->map) == sizeof(short) || sizeof(ri->map) == sizeof(int) || sizeof(ri->map) == sizeof(long)) || sizeof(ri->map) == sizeof(long long))) __compiletime_assert_572(); } while (0); do { *(volatile typeof(ri->map) *)&(ri->map) = (((void *)0)); } while (0); } while (0);
ri->flags = 0;
}

return XDP_REDIRECT;
}
# 6 "./include/net/sock_reuseport.h" 2





extern spinlock_t reuseport_lock;

struct sock_reuseport {
struct callback_head rcu;

u16 max_socks;
u16 num_socks;
u16 num_closed_socks;



unsigned int synq_overflow_ts;

unsigned int reuseport_id;
unsigned int bind_inany:1;
unsigned int has_conns:1;
struct bpf_prog *prog;
struct sock *socks[];
};

extern int reuseport_alloc(struct sock *sk, bool bind_inany);
extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
bool bind_inany);
extern void reuseport_detach_sock(struct sock *sk);
void reuseport_stop_listen_sock(struct sock *sk);
extern struct sock *reuseport_select_sock(struct sock *sk,
u32 hash,
struct sk_buff *skb,
int hdr_len);
struct sock *reuseport_migrate_sock(struct sock *sk,
struct sock *migrating_sk,
struct sk_buff *skb);
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
extern int reuseport_detach_prog(struct sock *sk);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool reuseport_has_conns(struct sock *sk, bool set)
{
struct sock_reuseport *reuse;
bool ret = false;

rcu_read_lock();
reuse = ({ typeof(*(sk->sk_reuseport_cb)) *__UNIQUE_ID_rcu573 = (typeof(*(sk->sk_reuseport_cb)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_574(void) ; if (!((sizeof((sk->sk_reuseport_cb)) == sizeof(char) || sizeof((sk->sk_reuseport_cb)) == sizeof(short) || sizeof((sk->sk_reuseport_cb)) == sizeof(int) || sizeof((sk->sk_reuseport_cb)) == sizeof(long)) || sizeof((sk->sk_reuseport_cb)) == sizeof(long long))) __compiletime_assert_574(); } while (0); (*(const volatile typeof( _Generic(((sk->sk_reuseport_cb)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((sk->sk_reuseport_cb)))) *)&((sk->sk_reuseport_cb))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(sk->sk_reuseport_cb)) *)(__UNIQUE_ID_rcu573)); });
if (reuse) {
if (set)
reuse->has_conns = 1;
ret = reuse->has_conns;
}
rcu_read_unlock();

return ret;
}
# 36 "./include/net/tcp.h" 2


# 1 "./include/net/ip.h" 1
# 27 "./include/net/ip.h"
# 1 "./include/linux/static_key.h" 1
# 28 "./include/net/ip.h" 2
# 40 "./include/net/ip.h"
extern unsigned int sysctl_fib_sync_mem;
extern unsigned int sysctl_fib_sync_mem_min;
extern unsigned int sysctl_fib_sync_mem_max;

struct sock;

struct inet_skb_parm {
int iif;
struct ip_options opt;
u16 flags;
# 60 "./include/net/ip.h"
u16 frag_max_size;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ipv4_l3mdev_skb(u16 flags)
{
return !!(flags & ((((1UL))) << (7)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int ip_hdrlen(const struct sk_buff *skb)
{
return ip_hdr(skb)->ihl * 4;
}

struct ipcm_cookie {
struct sockcm_cookie sockc;
__be32 addr;
int oif;
struct ip_options_rcu *opt;
__u8 ttl;
__s16 tos;
char priority;
__u16 gso_size;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipcm_init(struct ipcm_cookie *ipcm)
{
*ipcm = (struct ipcm_cookie) { .tos = -1 };
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipcm_init_sk(struct ipcm_cookie *ipcm,
const struct inet_sock *inet)
{
ipcm_init(ipcm);

ipcm->sockc.mark = inet->sk.sk_mark;
ipcm->sockc.tsflags = inet->sk.sk_tsflags;
ipcm->oif = inet->sk.__sk_common.skc_bound_dev_if;
ipcm->addr = inet->inet_saddr;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int inet_sdif(const struct sk_buff *skb)
{




return 0;
}
# 124 "./include/net/ip.h"
struct ip_ra_chain {
struct ip_ra_chain *next;
struct sock *sk;
union {
void (*destructor)(struct sock *);
struct sock *saved_sk;
};
struct callback_head rcu;
};
# 142 "./include/net/ip.h"
struct msghdr;
struct net_device;
struct packet_type;
struct rtable;
struct sockaddr;

int igmp_mc_init(void);





int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
__be32 saddr, __be32 daddr,
struct ip_options_rcu *opt, u8 tos);
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
struct net_device *orig_dev);
void ip_list_rcv(struct list_head *head, struct packet_type *pt,
struct net_device *orig_dev);
int ip_local_deliver(struct sk_buff *skb);
void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
int ip_mr_input(struct sk_buff *skb);
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));

struct ip_fraglist_iter {
struct sk_buff *frag;
struct iphdr *iph;
int offset;
unsigned int hlen;
};

void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
unsigned int hlen, struct ip_fraglist_iter *iter);
void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
{
struct sk_buff *skb = iter->frag;

iter->frag = skb->next;
skb_mark_not_on_list(skb);

return skb;
}

struct ip_frag_state {
bool DF;
unsigned int hlen;
unsigned int ll_rs;
unsigned int mtu;
unsigned int left;
int offset;
int ptr;
__be16 not_last_frag;
};

void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
unsigned int mtu, bool DF, struct ip_frag_state *state);
struct sk_buff *ip_frag_next(struct sk_buff *skb,
struct ip_frag_state *state);

void ip_send_check(struct iphdr *ip);
int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);

int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
__u8 tos);
void ip_init(void);
int ip_append_data(struct sock *sk, struct flowi4 *fl4,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int len, int protolen,
struct ipcm_cookie *ipc,
struct rtable **rt,
unsigned int flags);
int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
struct sk_buff *skb);
ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
int offset, size_t size, int flags);
struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
struct sk_buff_head *queue,
struct inet_cork *cork);
int ip_send_skb(struct net *net, struct sk_buff *skb);
int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
void ip_flush_pending_frames(struct sock *sk);
struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm_cookie *ipc, struct rtable **rtp,
struct inet_cork *cork, unsigned int flags);

int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
{
return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
{
return (ipc->tos != -1) ? ((ipc->tos)&0x1E) : ((inet->tos)&0x1E);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
{
return (ipc->tos != -1) ? (((ipc->tos)&0x1E) | sock_flag(sk, SOCK_LOCALROUTE)) : (((inet_sk(sk)->tos)&0x1E) | sock_flag(sk, SOCK_LOCALROUTE));
}


int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);

void ip4_datagram_release_cb(struct sock *sk);

struct ip_reply_arg {
struct kvec iov[1];
int flags;
__wsum csum;
int csumoffset;

int bound_dev_if;
u8 tos;
kuid_t uid;
};



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
{
return (arg->flags & 1) ? 0x01 : 0;
}

void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
const struct ip_options *sopt,
__be32 daddr, __be32 saddr,
const struct ip_reply_arg *arg,
unsigned int len, u64 transmit_time);
# 295 "./include/net/ip.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 snmp_get_cpu_field(void *mib, int cpu, int offt)
{
return *(((unsigned long *)({ do { const void *__vpp_verify = (typeof((mib) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((mib))) *)((mib))); (typeof((typeof(*((mib))) *)((mib)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })) + offt);
}

unsigned long snmp_fold_field(void *mib, int offt);





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 snmp_get_cpu_field64(void *mib, int cpu, int offct,
size_t syncp_offset)
{
return snmp_get_cpu_field(mib, cpu, offct);

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 snmp_fold_field64(void *mib, int offt, size_t syncp_off)
{
return snmp_fold_field(mib, offt);
}
# 342 "./include/net/ip.h"
void inet_get_local_port_range(struct net *net, int *low, int *high);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inet_is_local_reserved_port(struct net *net, unsigned short port)
{
if (!net->ipv4.sysctl_local_reserved_ports)
return false;
return arch_test_bit(port, net->ipv4.sysctl_local_reserved_ports);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sysctl_dev_name_is_allowed(const char *name)
{
return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inet_port_requires_bind_service(struct net *net, unsigned short port)
{
return port < net->ipv4.sysctl_ip_prot_sock;
}
# 374 "./include/net/ip.h"
__be32 inet_current_timestamp(void);


extern int inet_peer_threshold;
extern int inet_peer_minttl;
extern int inet_peer_maxttl;

void ipfrag_init(void);

void ip_static_sysctl_init(void);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ip_is_fragment(const struct iphdr *iph)
{
return (iph->frag_off & (( __be16)(__builtin_constant_p((__u16)((0x2000 | 0x1FFF))) ? ((__u16)( (((__u16)((0x2000 | 0x1FFF)) & (__u16)0x00ffU) << 8) | (((__u16)((0x2000 | 0x1FFF)) & (__u16)0xff00U) >> 8))) : __fswab16((0x2000 | 0x1FFF))))) != 0;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int ip_decrease_ttl(struct iphdr *iph)
{
u32 check = ( u32)iph->check;
check += ( u32)(( __be16)(__builtin_constant_p((__u16)((0x0100))) ? ((__u16)( (((__u16)((0x0100)) & (__u16)0x00ffU) << 8) | (((__u16)((0x0100)) & (__u16)0xff00U) >> 8))) : __fswab16((0x0100))));
iph->check = ( __sum16)(check + (check>=0xFFFF));
return --iph->ttl;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip_mtu_locked(const struct dst_entry *dst)
{
const struct rtable *rt = (const struct rtable *)dst;

return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
{
u8 pmtudisc = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_575(void) ; if (!((sizeof(inet_sk(sk)->pmtudisc) == sizeof(char) || sizeof(inet_sk(sk)->pmtudisc) == sizeof(short) || sizeof(inet_sk(sk)->pmtudisc) == sizeof(int) || sizeof(inet_sk(sk)->pmtudisc) == sizeof(long)) || sizeof(inet_sk(sk)->pmtudisc) == sizeof(long long))) __compiletime_assert_575(); } while (0); (*(const volatile typeof( _Generic((inet_sk(sk)->pmtudisc), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (inet_sk(sk)->pmtudisc))) *)&(inet_sk(sk)->pmtudisc)); });

return pmtudisc == 2 ||
(pmtudisc == 1 &&
!ip_mtu_locked(dst));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ip_sk_accept_pmtu(const struct sock *sk)
{
return inet_sk(sk)->pmtudisc != 4 &&
inet_sk(sk)->pmtudisc != 5;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ip_sk_use_pmtu(const struct sock *sk)
{
return inet_sk(sk)->pmtudisc < 3;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ip_sk_ignore_df(const struct sock *sk)
{
return inet_sk(sk)->pmtudisc < 2 ||
inet_sk(sk)->pmtudisc == 5;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
bool forwarding)
{
const struct rtable *rt = ({ void *__mptr = (void *)(dst); _Static_assert(__builtin_types_compatible_p(typeof(*(dst)), typeof(((struct rtable *)0)->dst)) || __builtin_types_compatible_p(typeof(*(dst)), typeof(void)), "pointer type mismatch in container_of()"); ((struct rtable *)(__mptr - __builtin_offsetof(struct rtable, dst))); });
struct net *net = dev_net(dst->dev);
unsigned int mtu;

if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
ip_mtu_locked(dst) ||
!forwarding) {
mtu = rt->rt_pmtu;
if (mtu && (({ unsigned long __dummy; typeof(rt->dst.expires) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy; typeof(jiffies) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ((long)((jiffies) - (rt->dst.expires)) < 0)))
goto out;
}


mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
goto out;

mtu = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_576(void) ; if (!((sizeof(dst->dev->mtu) == sizeof(char) || sizeof(dst->dev->mtu) == sizeof(short) || sizeof(dst->dev->mtu) == sizeof(int) || sizeof(dst->dev->mtu) == sizeof(long)) || sizeof(dst->dev->mtu) == sizeof(long long))) __compiletime_assert_576(); } while (0); (*(const volatile typeof( _Generic((dst->dev->mtu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (dst->dev->mtu))) *)&(dst->dev->mtu)); });

if (__builtin_expect(!!(ip_mtu_locked(dst)), 0)) {
if (rt->rt_uses_gateway && mtu > 576)
mtu = 576;
}

out:
mtu = __builtin_choose_expr(((!!(sizeof((typeof((unsigned int)(mtu)) *)1 == (typeof((unsigned int)(0xFFFFU)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned int)(mtu)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned int)(0xFFFFU)) * 0l)) : (int *)8))))), (((unsigned int)(mtu)) < ((unsigned int)(0xFFFFU)) ? ((unsigned int)(mtu)) : ((unsigned int)(0xFFFFU))), ({ typeof((unsigned int)(mtu)) __UNIQUE_ID___x577 = ((unsigned int)(mtu)); typeof((unsigned int)(0xFFFFU)) __UNIQUE_ID___y578 = ((unsigned int)(0xFFFFU)); ((__UNIQUE_ID___x577) < (__UNIQUE_ID___y578) ? (__UNIQUE_ID___x577) : (__UNIQUE_ID___y578)); }));

return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int ip_skb_dst_mtu(struct sock *sk,
const struct sk_buff *skb)
{
unsigned int mtu;

if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
bool forwarding = ((struct inet_skb_parm*)((skb)->cb))->flags & ((((1UL))) << (0));

return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
}

mtu = __builtin_choose_expr(((!!(sizeof((typeof(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_579(void) ; if (!((sizeof(skb_dst(skb)->dev->mtu) == sizeof(char) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(short) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(int) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(long)) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(long long))) __compiletime_assert_579(); } while (0); (*(const volatile typeof( _Generic((skb_dst(skb)->dev->mtu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (skb_dst(skb)->dev->mtu))) *)&(skb_dst(skb)->dev->mtu)); })) *)1 == (typeof(0xFFFFU) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_579(void) ; if (!((sizeof(skb_dst(skb)->dev->mtu) == sizeof(char) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(short) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(int) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(long)) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(long long))) __compiletime_assert_579(); } while (0); (*(const volatile typeof( _Generic((skb_dst(skb)->dev->mtu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (skb_dst(skb)->dev->mtu))) *)&(skb_dst(skb)->dev->mtu)); })) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(0xFFFFU) * 0l)) : (int *)8))))), ((({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_579(void) ; if (!((sizeof(skb_dst(skb)->dev->mtu) == sizeof(char) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(short) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(int) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(long)) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(long long))) __compiletime_assert_579(); } while (0); (*(const volatile typeof( _Generic((skb_dst(skb)->dev->mtu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (skb_dst(skb)->dev->mtu))) *)&(skb_dst(skb)->dev->mtu)); })) < (0xFFFFU) ? (({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_579(void) ; if (!((sizeof(skb_dst(skb)->dev->mtu) == sizeof(char) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(short) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(int) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(long)) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(long long))) __compiletime_assert_579(); } while (0); (*(const volatile typeof( _Generic((skb_dst(skb)->dev->mtu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (skb_dst(skb)->dev->mtu))) *)&(skb_dst(skb)->dev->mtu)); })) : (0xFFFFU)), ({ typeof(({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_579(void) ; if (!((sizeof(skb_dst(skb)->dev->mtu) == sizeof(char) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(short) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(int) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(long)) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(long long))) __compiletime_assert_579(); } while (0); (*(const volatile typeof( _Generic((skb_dst(skb)->dev->mtu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (skb_dst(skb)->dev->mtu))) *)&(skb_dst(skb)->dev->mtu)); })) __UNIQUE_ID___x580 = (({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_579(void) ; if (!((sizeof(skb_dst(skb)->dev->mtu) == sizeof(char) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(short) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(int) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(long)) || sizeof(skb_dst(skb)->dev->mtu) == sizeof(long long))) __compiletime_assert_579(); } while (0); (*(const volatile typeof( _Generic((skb_dst(skb)->dev->mtu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (skb_dst(skb)->dev->mtu))) *)&(skb_dst(skb)->dev->mtu)); })); typeof(0xFFFFU) __UNIQUE_ID___y581 = (0xFFFFU); ((__UNIQUE_ID___x580) < (__UNIQUE_ID___y581) ? (__UNIQUE_ID___x580) : (__UNIQUE_ID___y581)); }));
return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
}

struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
int fc_mx_len,
struct netlink_ext_ack *extack);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
{
if (fib_metrics != &dst_default_metrics &&
refcount_dec_and_test(&fib_metrics->refcnt))
kfree(fib_metrics);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
{
dst_init_metrics(dst, fib_metrics->metrics, true);

if (fib_metrics != &dst_default_metrics) {
dst->_metrics |= 0x2UL;
refcount_inc(&fib_metrics->refcnt);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void ip_dst_metrics_put(struct dst_entry *dst)
{
struct dst_metrics *p = (struct dst_metrics *)((u32 *)(((dst)->_metrics) & ~0x3UL));

if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
kfree(p);
}

void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
struct sock *sk, int segs)
{
struct iphdr *iph = ip_hdr(skb);




if (sk && inet_sk(sk)->sk.__sk_common.skc_daddr) {
iph->id = (( __be16)(__builtin_constant_p((__u16)((inet_sk(sk)->inet_id))) ? ((__u16)( (((__u16)((inet_sk(sk)->inet_id)) & (__u16)0x00ffU) << 8) | (((__u16)((inet_sk(sk)->inet_id)) & (__u16)0xff00U) >> 8))) : __fswab16((inet_sk(sk)->inet_id))));
inet_sk(sk)->inet_id += segs;
return;
}
if ((iph->frag_off & (( __be16)(__builtin_constant_p((__u16)((0x4000))) ? ((__u16)( (((__u16)((0x4000)) & (__u16)0x00ffU) << 8) | (((__u16)((0x4000)) & (__u16)0xff00U) >> 8))) : __fswab16((0x4000))))) && !skb->ignore_df) {
iph->id = 0;
} else {

__ip_select_ident(net, iph, segs);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip_select_ident(struct net *net, struct sk_buff *skb,
struct sock *sk)
{
ip_select_ident_segs(net, skb, sk, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
{
return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
skb->len, proto, 0);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
const struct iphdr *iph)
{
do { __attribute__((__noreturn__)) extern void __compiletime_assert_582(void) ; if (!(!(__builtin_offsetof(typeof(flow->addrs), v4addrs.dst) != __builtin_offsetof(typeof(flow->addrs), v4addrs.src) + sizeof(flow->addrs.v4addrs.src)))) __compiletime_assert_582(); } while (0);


memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip_eth_mc_map(__be32 naddr, char *buf)
{
__u32 addr=(__builtin_constant_p((__u32)(( __u32)(__be32)(naddr))) ? ((__u32)( (((__u32)(( __u32)(__be32)(naddr)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(naddr)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(naddr)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(naddr)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(naddr)));
buf[0]=0x01;
buf[1]=0x00;
buf[2]=0x5e;
buf[5]=addr&0xFF;
addr>>=8;
buf[4]=addr&0xFF;
addr>>=8;
buf[3]=addr&0x7F;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
{
__u32 addr;
unsigned char scope = broadcast[5] & 0xF;

buf[0] = 0;
buf[1] = 0xff;
buf[2] = 0xff;
buf[3] = 0xff;
addr = (__builtin_constant_p((__u32)(( __u32)(__be32)(naddr))) ? ((__u32)( (((__u32)(( __u32)(__be32)(naddr)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(naddr)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(naddr)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(naddr)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(naddr)));
buf[4] = 0xff;
buf[5] = 0x10 | scope;
buf[6] = 0x40;
buf[7] = 0x1b;
buf[8] = broadcast[8];
buf[9] = broadcast[9];
buf[10] = 0;
buf[11] = 0;
buf[12] = 0;
buf[13] = 0;
buf[14] = 0;
buf[15] = 0;
buf[19] = addr & 0xff;
addr >>= 8;
buf[18] = addr & 0xff;
addr >>= 8;
buf[17] = addr & 0xff;
addr >>= 8;
buf[16] = addr & 0x0f;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
{
if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
memcpy(buf, broadcast, 4);
else
memcpy(buf, &naddr, sizeof(naddr));
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void inet_reset_saddr(struct sock *sk)
{
inet_sk(sk)->sk.__sk_common.skc_rcv_saddr = inet_sk(sk)->inet_saddr = 0;

if (sk->__sk_common.skc_family == 10) {
struct ipv6_pinfo *np = inet6_sk(sk);

memset(&np->saddr, 0, sizeof(np->saddr));
memset(&sk->__sk_common.skc_v6_rcv_saddr, 0, sizeof(sk->__sk_common.skc_v6_rcv_saddr));
}

}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int ipv4_addr_hash(__be32 ip)
{
return ( unsigned int) ip;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 ipv4_portaddr_hash(const struct net *net,
__be32 saddr,
unsigned int port)
{
return jhash_1word(( u32)saddr, net_hash_mix(net)) ^ port;
}

bool ip_call_ra_chain(struct sk_buff *skb);





enum ip_defrag_users {
IP_DEFRAG_LOCAL_DELIVER,
IP_DEFRAG_CALL_RA_CHAIN,
IP_DEFRAG_CONNTRACK_IN,
__IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + ((unsigned short)~0U),
IP_DEFRAG_CONNTRACK_OUT,
__IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + ((unsigned short)~0U),
IP_DEFRAG_CONNTRACK_BRIDGE_IN,
__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + ((unsigned short)~0U),
IP_DEFRAG_VS_IN,
IP_DEFRAG_VS_OUT,
IP_DEFRAG_VS_FWD,
IP_DEFRAG_AF_PACKET,
IP_DEFRAG_MACVLAN,
};




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ip_defrag_user_in_between(u32 user,
enum ip_defrag_users lower_bond,
enum ip_defrag_users upper_bond)
{
return user >= lower_bond && user <= upper_bond;
}

int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);

struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
# 707 "./include/net/ip.h"
int ip_forward(struct sk_buff *skb);





void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
__be32 daddr, struct rtable *rt);

int __ip_options_echo(struct net *net, struct ip_options *dopt,
struct sk_buff *skb, const struct ip_options *sopt);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip_options_echo(struct net *net, struct ip_options *dopt,
struct sk_buff *skb)
{
return __ip_options_echo(net, dopt, skb, &((struct inet_skb_parm*)((skb)->cb))->opt);
}

void ip_options_fragment(struct sk_buff *skb);
int __ip_options_compile(struct net *net, struct ip_options *opt,
struct sk_buff *skb, __be32 *info);
int ip_options_compile(struct net *net, struct ip_options *opt,
struct sk_buff *skb);
int ip_options_get(struct net *net, struct ip_options_rcu **optp,
sockptr_t data, int optlen);
void ip_options_undo(struct ip_options *opt);
void ip_forward_options(struct sk_buff *skb);
int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);





void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
extern struct static_key_false ip4_min_ttl;
int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
int ip_getsockopt(struct sock *sk, int level, int optname, char *optval,
int *optlen);
int ip_ra_control(struct sock *sk, unsigned char on,
void (*destructor)(struct sock *));

int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
u32 info, u8 *payload);
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
u32 info);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
{
ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
}

bool icmp_global_allow(void);
extern int sysctl_icmp_msgs_per_sec;
extern int sysctl_icmp_msgs_burst;


int ip_misc_proc_init(void);


int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
struct netlink_ext_ack *extack);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inetdev_valid_mtu(unsigned int mtu)
{
return __builtin_expect(!!(mtu >= 68), 1);
}

void ip_sock_set_freebind(struct sock *sk);
int ip_sock_set_mtu_discover(struct sock *sk, int val);
void ip_sock_set_pktinfo(struct sock *sk);
void ip_sock_set_recverr(struct sock *sk);
void ip_sock_set_tos(struct sock *sk, int val);
void __ip_sock_set_tos(struct sock *sk, int val);
# 39 "./include/net/tcp.h" 2

# 1 "./include/net/inet_ecn.h" 1
# 10 "./include/net/inet_ecn.h"
# 1 "./include/net/dsfield.h" 1
# 16 "./include/net/dsfield.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u8 ipv4_get_dsfield(const struct iphdr *iph)
{
return iph->tos;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u8 ipv6_get_dsfield(const struct ipv6hdr *ipv6h)
{
return (__builtin_constant_p((__u16)(( __u16)(__be16)(*( const __be16 *)ipv6h))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*( const __be16 *)ipv6h)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*( const __be16 *)ipv6h)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*( const __be16 *)ipv6h))) >> 4;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipv4_change_dsfield(struct iphdr *iph,__u8 mask,
__u8 value)
{
__u32 check = (__builtin_constant_p((__u16)(( __u16)(__be16)(( __be16)iph->check))) ? ((__u16)( (((__u16)(( __u16)(__be16)(( __be16)iph->check)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(( __be16)iph->check)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(( __be16)iph->check)));
__u8 dsfield;

dsfield = (iph->tos & mask) | value;
check += iph->tos;
if ((check+1) >> 16) check = (check+1) & 0xffff;
check -= dsfield;
check += check >> 16;
iph->check = ( __sum16)(( __be16)(__builtin_constant_p((__u16)((check))) ? ((__u16)( (((__u16)((check)) & (__u16)0x00ffU) << 8) | (((__u16)((check)) & (__u16)0xff00U) >> 8))) : __fswab16((check))));
iph->tos = dsfield;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipv6_change_dsfield(struct ipv6hdr *ipv6h,__u8 mask,
__u8 value)
{
__be16 *p = ( __be16 *)ipv6h;

*p = (*p & (( __be16)(__builtin_constant_p((__u16)(((((u16)mask << 4) | 0xf00f)))) ? ((__u16)( (((__u16)(((((u16)mask << 4) | 0xf00f))) & (__u16)0x00ffU) << 8) | (((__u16)(((((u16)mask << 4) | 0xf00f))) & (__u16)0xff00U) >> 8))) : __fswab16(((((u16)mask << 4) | 0xf00f)))))) | (( __be16)(__builtin_constant_p((__u16)(((u16)value << 4))) ? ((__u16)( (((__u16)(((u16)value << 4)) & (__u16)0x00ffU) << 8) | (((__u16)(((u16)value << 4)) & (__u16)0xff00U) >> 8))) : __fswab16(((u16)value << 4))));
}
# 11 "./include/net/inet_ecn.h" 2


enum {
INET_ECN_NOT_ECT = 0,
INET_ECN_ECT_1 = 1,
INET_ECN_ECT_0 = 2,
INET_ECN_CE = 3,
INET_ECN_MASK = 3,
};

extern int sysctl_tunnel_ecn_log;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int INET_ECN_is_ce(__u8 dsfield)
{
return (dsfield & INET_ECN_MASK) == INET_ECN_CE;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int INET_ECN_is_not_ect(__u8 dsfield)
{
return (dsfield & INET_ECN_MASK) == INET_ECN_NOT_ECT;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int INET_ECN_is_capable(__u8 dsfield)
{
return dsfield & INET_ECN_ECT_0;
}
# 46 "./include/net/inet_ecn.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
{
outer &= ~INET_ECN_MASK;
outer |= !INET_ECN_is_ce(inner) ? (inner & INET_ECN_MASK) :
INET_ECN_ECT_0;
return outer;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void INET_ECN_xmit(struct sock *sk)
{
inet_sk(sk)->tos |= INET_ECN_ECT_0;
if (inet6_sk(sk) != ((void *)0))
inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void INET_ECN_dontxmit(struct sock *sk)
{
inet_sk(sk)->tos &= ~INET_ECN_MASK;
if (inet6_sk(sk) != ((void *)0))
inet6_sk(sk)->tclass &= ~INET_ECN_MASK;
}
# 77 "./include/net/inet_ecn.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int IP_ECN_set_ce(struct iphdr *iph)
{
u32 ecn = (iph->tos + 1) & INET_ECN_MASK;
__be16 check_add;
# 89 "./include/net/inet_ecn.h"
if (!(ecn & 2))
return !ecn;






check_add = ( __be16)(( u16)(( __be16)(__builtin_constant_p((__u16)((0xFFFB))) ? ((__u16)( (((__u16)((0xFFFB)) & (__u16)0x00ffU) << 8) | (((__u16)((0xFFFB)) & (__u16)0xff00U) >> 8))) : __fswab16((0xFFFB)))) +
( u16)(( __be16)(__builtin_constant_p((__u16)((ecn))) ? ((__u16)( (((__u16)((ecn)) & (__u16)0x00ffU) << 8) | (((__u16)((ecn)) & (__u16)0xff00U) >> 8))) : __fswab16((ecn)))));

iph->check = csum16_add(iph->check, check_add);
iph->tos |= INET_ECN_CE;
return 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int IP_ECN_set_ect1(struct iphdr *iph)
{
if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
return 0;

iph->check = csum16_add(iph->check, (( __be16)(__builtin_constant_p((__u16)((0x1))) ? ((__u16)( (((__u16)((0x1)) & (__u16)0x00ffU) << 8) | (((__u16)((0x1)) & (__u16)0xff00U) >> 8))) : __fswab16((0x1)))));
iph->tos ^= INET_ECN_MASK;
return 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void IP_ECN_clear(struct iphdr *iph)
{
iph->tos &= ~INET_ECN_MASK;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner)
{
dscp &= ~INET_ECN_MASK;
ipv4_change_dsfield(inner, INET_ECN_MASK, dscp);
}

struct ipv6hdr;







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
{
__be32 from, to;

if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
return 0;

from = *(__be32 *)iph;
to = from | (( __be32)(__builtin_constant_p((__u32)((INET_ECN_CE << 20))) ? ((__u32)( (((__u32)((INET_ECN_CE << 20)) & (__u32)0x000000ffUL) << 24) | (((__u32)((INET_ECN_CE << 20)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((INET_ECN_CE << 20)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((INET_ECN_CE << 20)) & (__u32)0xff000000UL) >> 24))) : __fswab32((INET_ECN_CE << 20))));
*(__be32 *)iph = to;
if (skb->ip_summed == 2)
skb->csum = csum_add(csum_sub(skb->csum, ( __wsum)from),
( __wsum)to);
return 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph)
{
__be32 from, to;

if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0)
return 0;

from = *(__be32 *)iph;
to = from ^ (( __be32)(__builtin_constant_p((__u32)((INET_ECN_MASK << 20))) ? ((__u32)( (((__u32)((INET_ECN_MASK << 20)) & (__u32)0x000000ffUL) << 24) | (((__u32)((INET_ECN_MASK << 20)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((INET_ECN_MASK << 20)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((INET_ECN_MASK << 20)) & (__u32)0xff000000UL) >> 24))) : __fswab32((INET_ECN_MASK << 20))));
*(__be32 *)iph = to;
if (skb->ip_summed == 2)
skb->csum = csum_add(csum_sub(skb->csum, ( __wsum)from),
( __wsum)to);
return 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
{
dscp &= ~INET_ECN_MASK;
ipv6_change_dsfield(inner, INET_ECN_MASK, dscp);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int INET_ECN_set_ce(struct sk_buff *skb)
{
switch (skb_protocol(skb, true)) {
case (( __be16)(__builtin_constant_p((__u16)((0x0800))) ? ((__u16)( (((__u16)((0x0800)) & (__u16)0x00ffU) << 8) | (((__u16)((0x0800)) & (__u16)0xff00U) >> 8))) : __fswab16((0x0800)))):
if (skb_network_header(skb) + sizeof(struct iphdr) <=
skb_tail_pointer(skb))
return IP_ECN_set_ce(ip_hdr(skb));
break;

case (( __be16)(__builtin_constant_p((__u16)((0x86DD))) ? ((__u16)( (((__u16)((0x86DD)) & (__u16)0x00ffU) << 8) | (((__u16)((0x86DD)) & (__u16)0xff00U) >> 8))) : __fswab16((0x86DD)))):
if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
skb_tail_pointer(skb))
return IP6_ECN_set_ce(skb, ipv6_hdr(skb));
break;
}

return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_get_dsfield(struct sk_buff *skb)
{
switch (skb_protocol(skb, true)) {
case (( __be16)(__builtin_constant_p((__u16)((0x0800))) ? ((__u16)( (((__u16)((0x0800)) & (__u16)0x00ffU) << 8) | (((__u16)((0x0800)) & (__u16)0xff00U) >> 8))) : __fswab16((0x0800)))):
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
break;
return ipv4_get_dsfield(ip_hdr(skb));

case (( __be16)(__builtin_constant_p((__u16)((0x86DD))) ? ((__u16)( (((__u16)((0x86DD)) & (__u16)0x00ffU) << 8) | (((__u16)((0x86DD)) & (__u16)0xff00U) >> 8))) : __fswab16((0x86DD)))):
if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
break;
return ipv6_get_dsfield(ipv6_hdr(skb));
}

return -1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int INET_ECN_set_ect1(struct sk_buff *skb)
{
switch (skb_protocol(skb, true)) {
case (( __be16)(__builtin_constant_p((__u16)((0x0800))) ? ((__u16)( (((__u16)((0x0800)) & (__u16)0x00ffU) << 8) | (((__u16)((0x0800)) & (__u16)0xff00U) >> 8))) : __fswab16((0x0800)))):
if (skb_network_header(skb) + sizeof(struct iphdr) <=
skb_tail_pointer(skb))
return IP_ECN_set_ect1(ip_hdr(skb));
break;

case (( __be16)(__builtin_constant_p((__u16)((0x86DD))) ? ((__u16)( (((__u16)((0x86DD)) & (__u16)0x00ffU) << 8) | (((__u16)((0x86DD)) & (__u16)0xff00U) >> 8))) : __fswab16((0x86DD)))):
if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
skb_tail_pointer(skb))
return IP6_ECN_set_ect1(skb, ipv6_hdr(skb));
break;
}

return 0;
}
# 251 "./include/net/inet_ecn.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __INET_ECN_decapsulate(__u8 outer, __u8 inner, bool *set_ce)
{
if (INET_ECN_is_not_ect(inner)) {
switch (outer & INET_ECN_MASK) {
case INET_ECN_NOT_ECT:
return 0;
case INET_ECN_ECT_0:
case INET_ECN_ECT_1:
return 1;
case INET_ECN_CE:
return 2;
}
}

*set_ce = INET_ECN_is_ce(outer);
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int INET_ECN_decapsulate(struct sk_buff *skb,
__u8 outer, __u8 inner)
{
bool set_ce = false;
int rc;

rc = __INET_ECN_decapsulate(outer, inner, &set_ce);
if (!rc) {
if (set_ce)
INET_ECN_set_ce(skb);
else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1)
INET_ECN_set_ect1(skb);
}

return rc;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int IP_ECN_decapsulate(const struct iphdr *oiph,
struct sk_buff *skb)
{
__u8 inner;

switch (skb_protocol(skb, true)) {
case (( __be16)(__builtin_constant_p((__u16)((0x0800))) ? ((__u16)( (((__u16)((0x0800)) & (__u16)0x00ffU) << 8) | (((__u16)((0x0800)) & (__u16)0xff00U) >> 8))) : __fswab16((0x0800)))):
inner = ip_hdr(skb)->tos;
break;
case (( __be16)(__builtin_constant_p((__u16)((0x86DD))) ? ((__u16)( (((__u16)((0x86DD)) & (__u16)0x00ffU) << 8) | (((__u16)((0x86DD)) & (__u16)0xff00U) >> 8))) : __fswab16((0x86DD)))):
inner = ipv6_get_dsfield(ipv6_hdr(skb));
break;
default:
return 0;
}

return INET_ECN_decapsulate(skb, oiph->tos, inner);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h,
struct sk_buff *skb)
{
__u8 inner;

switch (skb_protocol(skb, true)) {
case (( __be16)(__builtin_constant_p((__u16)((0x0800))) ? ((__u16)( (((__u16)((0x0800)) & (__u16)0x00ffU) << 8) | (((__u16)((0x0800)) & (__u16)0xff00U) >> 8))) : __fswab16((0x0800)))):
inner = ip_hdr(skb)->tos;
break;
case (( __be16)(__builtin_constant_p((__u16)((0x86DD))) ? ((__u16)( (((__u16)((0x86DD)) & (__u16)0x00ffU) << 8) | (((__u16)((0x86DD)) & (__u16)0xff00U) >> 8))) : __fswab16((0x86DD)))):
inner = ipv6_get_dsfield(ipv6_hdr(skb));
break;
default:
return 0;
}

return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
}
# 41 "./include/net/tcp.h" 2

# 1 "./include/net/mptcp.h" 1
# 15 "./include/net/mptcp.h"
struct mptcp_info;
struct mptcp_sock;
struct seq_file;


struct mptcp_ext {
union {
u64 data_ack;
u32 data_ack32;
};
u64 data_seq;
u32 subflow_seq;
u16 data_len;
__sum16 csum;
u8 use_map:1,
dsn64:1,
data_fin:1,
use_ack:1,
ack64:1,
mpc_map:1,
frozen:1,
reset_transient:1;
u8 reset_reason:4,
csum_reqd:1;
};



struct mptcp_rm_list {
u8 ids[8];
u8 nr;
};

struct mptcp_addr_info {
u8 id;
sa_family_t family;
__be16 port;
union {
struct in_addr addr;



};
};

struct mptcp_out_options {
# 95 "./include/net/mptcp.h"
};
# 201 "./include/net/mptcp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mptcp_init(void)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool sk_is_mptcp(const struct sock *sk)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool rsk_is_mptcp(const struct request_sock *req)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool rsk_drop_req(const struct request_sock *req)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
unsigned int *size,
struct mptcp_out_options *opts)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mptcp_synack_options(const struct request_sock *req,
unsigned int *size,
struct mptcp_out_options *opts)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mptcp_established_options(struct sock *sk,
struct sk_buff *skb,
unsigned int *size,
unsigned int remaining,
struct mptcp_out_options *opts)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mptcp_incoming_options(struct sock *sk,
struct sk_buff *skb)
{
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mptcp_skb_ext_move(struct sk_buff *to,
const struct sk_buff *from)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mptcp_skb_ext_copy(struct sk_buff *to,
struct sk_buff *from)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool mptcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mptcp_space(const struct sock *ssk, int *s, int *fs) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mptcp_seq_show(struct seq_file *seq) { }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mptcp_subflow_init_cookie_req(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be32 mptcp_reset_option(const struct sk_buff *skb) { return (( __be32)(__builtin_constant_p((__u32)((0u))) ? ((__u32)( (((__u32)((0u)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0u)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0u)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0u)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0u)))); }






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int mptcpv6_init(void) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { }
# 43 "./include/net/tcp.h" 2



# 1 "./include/linux/bpf-cgroup.h" 1
# 14 "./include/linux/bpf-cgroup.h"
struct sock;
struct sockaddr;
struct cgroup;
struct sk_buff;
struct bpf_map;
struct bpf_prog;
struct bpf_sock_ops_kern;
struct bpf_cgroup_storage;
struct ctl_table;
struct ctl_table_header;
struct task_struct;






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) enum cgroup_bpf_attach_type
to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
{
switch (attach_type) {
case BPF_CGROUP_INET_INGRESS: return CGROUP_INET_INGRESS;
case BPF_CGROUP_INET_EGRESS: return CGROUP_INET_EGRESS;
case BPF_CGROUP_INET_SOCK_CREATE: return CGROUP_INET_SOCK_CREATE;
case BPF_CGROUP_SOCK_OPS: return CGROUP_SOCK_OPS;
case BPF_CGROUP_DEVICE: return CGROUP_DEVICE;
case BPF_CGROUP_INET4_BIND: return CGROUP_INET4_BIND;
case BPF_CGROUP_INET6_BIND: return CGROUP_INET6_BIND;
case BPF_CGROUP_INET4_CONNECT: return CGROUP_INET4_CONNECT;
case BPF_CGROUP_INET6_CONNECT: return CGROUP_INET6_CONNECT;
case BPF_CGROUP_INET4_POST_BIND: return CGROUP_INET4_POST_BIND;
case BPF_CGROUP_INET6_POST_BIND: return CGROUP_INET6_POST_BIND;
case BPF_CGROUP_UDP4_SENDMSG: return CGROUP_UDP4_SENDMSG;
case BPF_CGROUP_UDP6_SENDMSG: return CGROUP_UDP6_SENDMSG;
case BPF_CGROUP_SYSCTL: return CGROUP_SYSCTL;
case BPF_CGROUP_UDP4_RECVMSG: return CGROUP_UDP4_RECVMSG;
case BPF_CGROUP_UDP6_RECVMSG: return CGROUP_UDP6_RECVMSG;
case BPF_CGROUP_GETSOCKOPT: return CGROUP_GETSOCKOPT;
case BPF_CGROUP_SETSOCKOPT: return CGROUP_SETSOCKOPT;
case BPF_CGROUP_INET4_GETPEERNAME: return CGROUP_INET4_GETPEERNAME;
case BPF_CGROUP_INET6_GETPEERNAME: return CGROUP_INET6_GETPEERNAME;
case BPF_CGROUP_INET4_GETSOCKNAME: return CGROUP_INET4_GETSOCKNAME;
case BPF_CGROUP_INET6_GETSOCKNAME: return CGROUP_INET6_GETSOCKNAME;
case BPF_CGROUP_INET_SOCK_RELEASE: return CGROUP_INET_SOCK_RELEASE;
default:
return CGROUP_BPF_ATTACH_TYPE_INVALID;
}
}



extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];





struct bpf_cgroup_storage_map;

struct bpf_storage_buffer {
struct callback_head rcu;
char data[];
};

struct bpf_cgroup_storage {
union {
struct bpf_storage_buffer *buf;
void *percpu_buf;
};
struct bpf_cgroup_storage_map *map;
struct bpf_cgroup_storage_key key;
struct list_head list_map;
struct list_head list_cg;
struct rb_node node;
struct callback_head rcu;
};

struct bpf_cgroup_link {
struct bpf_link link;
struct cgroup *cgroup;
enum bpf_attach_type type;
};

struct bpf_prog_list {
struct list_head node;
struct bpf_prog *prog;
struct bpf_cgroup_link *link;
struct bpf_cgroup_storage *storage[__BPF_CGROUP_STORAGE_MAX];
};

int cgroup_bpf_inherit(struct cgroup *cgrp);
void cgroup_bpf_offline(struct cgroup *cgrp);

int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
enum cgroup_bpf_attach_type atype);

int __cgroup_bpf_run_filter_sk(struct sock *sk,
enum cgroup_bpf_attach_type atype);

int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
struct sockaddr *uaddr,
enum cgroup_bpf_attach_type atype,
void *t_ctx,
u32 *flags);

int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
enum cgroup_bpf_attach_type atype);

int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
short access, enum cgroup_bpf_attach_type atype);

int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
struct ctl_table *table, int write,
char **buf, size_t *pcount, loff_t *ppos,
enum cgroup_bpf_attach_type atype);

int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
int *optname, char *optval,
int *optlen, char **kernel_optval);
int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
int optname, char *optval,
int *optlen, int max_optlen,
int retval);

int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
int optname, void *optval,
int *optlen, int retval);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) enum bpf_cgroup_storage_type cgroup_storage_type(
struct bpf_map *map)
{
if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
return BPF_CGROUP_STORAGE_PERCPU;

return BPF_CGROUP_STORAGE_SHARED;
}

struct bpf_cgroup_storage *
cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
void *key, bool locked);
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
enum bpf_cgroup_storage_type stype);
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
struct cgroup *cgroup,
enum bpf_attach_type type);
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);

int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
void *value, u64 flags);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cgroup_bpf_sock_enabled(struct sock *sk,
enum cgroup_bpf_attach_type type)
{
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
struct bpf_prog_array *array;

array = ({ typeof(*(cgrp->bpf.effective[type])) *__UNIQUE_ID_rcu583 = (typeof(*(cgrp->bpf.effective[type])) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_584(void) ; if (!((sizeof((cgrp->bpf.effective[type])) == sizeof(char) || sizeof((cgrp->bpf.effective[type])) == sizeof(short) || sizeof((cgrp->bpf.effective[type])) == sizeof(int) || sizeof((cgrp->bpf.effective[type])) == sizeof(long)) || sizeof((cgrp->bpf.effective[type])) == sizeof(long long))) __compiletime_assert_584(); } while (0); (*(const volatile typeof( _Generic(((cgrp->bpf.effective[type])), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((cgrp->bpf.effective[type])))) *)&((cgrp->bpf.effective[type]))); }); ; ((typeof(*(cgrp->bpf.effective[type])) *)(__UNIQUE_ID_rcu583)); });
return array != &bpf_empty_prog_array.hdr;
}
# 407 "./include/linux/bpf-cgroup.h"
int cgroup_bpf_prog_attach(const union bpf_attr *attr,
enum bpf_prog_type ptype, struct bpf_prog *prog);
int cgroup_bpf_prog_detach(const union bpf_attr *attr,
enum bpf_prog_type ptype);
int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int cgroup_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr *uattr);
# 47 "./include/net/tcp.h" 2


extern struct inet_hashinfo tcp_hashinfo;

extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned int) tcp_orphan_count;
int tcp_orphan_count_sum(void);

void tcp_time_wait(struct sock *sk, int state, int timeo);
# 248 "./include/net/tcp.h"
extern int sysctl_tcp_max_orphans;
extern long sysctl_tcp_mem[3];





extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
extern unsigned long tcp_memory_pressure;


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_under_memory_pressure(const struct sock *sk)
{
if (0 && sk->sk_memcg &&
mem_cgroup_under_socket_pressure(sk->sk_memcg))
return true;

return ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_585(void) ; if (!((sizeof(tcp_memory_pressure) == sizeof(char) || sizeof(tcp_memory_pressure) == sizeof(short) || sizeof(tcp_memory_pressure) == sizeof(int) || sizeof(tcp_memory_pressure) == sizeof(long)) || sizeof(tcp_memory_pressure) == sizeof(long long))) __compiletime_assert_585(); } while (0); (*(const volatile typeof( _Generic((tcp_memory_pressure), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (tcp_memory_pressure))) *)&(tcp_memory_pressure)); });
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool before(__u32 seq1, __u32 seq2)
{
return (__s32)(seq1-seq2) < 0;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool between(__u32 seq1, __u32 seq2, __u32 seq3)
{
return seq3 - seq2 >= seq1 - seq2;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_out_of_memory(struct sock *sk)
{
if (sk->sk_wmem_queued > ((2048 + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1))) * 2) &&
sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
return true;
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
sk_wmem_queued_add(sk, -skb->truesize);
if (!skb_zcopy_pure(skb))
sk_mem_uncharge(sk, skb->truesize);
else
sk_mem_uncharge(sk, ((skb_end_offset(skb)) + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << 6))) - 1)) + ((((sizeof(struct skb_shared_info))) + ((typeof((sizeof(struct skb_shared_info))))(((1 << 6))) - 1)) & ~((typeof((sizeof(struct skb_shared_info))))(((1 << 6))) - 1))));
__kfree_skb(skb);
}

void sk_forced_mem_schedule(struct sock *sk, int size);

bool tcp_check_oom(struct sock *sk, int shift);


extern struct proto tcp_prot;






void tcp_tasklet_init(void);

int tcp_v4_err(struct sk_buff *skb, u32);

void tcp_shutdown(struct sock *sk, int how);

int tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);

void tcp_remove_empty_skb(struct sock *sk);
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
int size_goal);
void tcp_release_cb(struct sock *sk);
void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk);
void tcp_delack_timer_handler(struct sock *sk);
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
bool force_schedule);

void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{
struct inet_connection_sock *icsk = inet_csk(sk);

if (icsk->icsk_ack.quick) {
if (pkts >= icsk->icsk_ack.quick) {
icsk->icsk_ack.quick = 0;

icsk->icsk_ack.ato = ((unsigned)(100/25));
} else
icsk->icsk_ack.quick -= pkts;
}
}






enum tcp_tw_status {
TCP_TW_SUCCESS = 0,
TCP_TW_RST = 1,
TCP_TW_ACK = 2,
TCP_TW_SYN = 3
};


enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
struct sk_buff *skb,
const struct tcphdr *th);
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req, bool fastopen,
bool *lost_race);
int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
void tcp_clear_retrans(struct tcp_sock *tp);
void tcp_update_metrics(struct sock *sk);
void tcp_init_metrics(struct sock *sk);
void tcp_metrics_init(void);
bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void __tcp_close(struct sock *sk, long timeout);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
__poll_t tcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char *optval, int *optlen);
bool tcp_bpf_bypass_getsockopt(int level, int optname);
int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
void tcp_set_keepalive(struct sock *sk, int val);
void tcp_syn_ack_timeout(const struct request_sock *req);
int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
int flags, int *addr_len);
int tcp_set_rcvlowat(struct sock *sk, int val);
int tcp_set_window_clamp(struct sock *sk, int val);
void tcp_update_recv_tstamps(struct sk_buff *skb,
struct scm_timestamping_internal *tss);
void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
struct scm_timestamping_internal *tss);
void tcp_data_ready(struct sock *sk);

int tcp_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma);

void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
struct tcp_options_received *opt_rx,
int estab, struct tcp_fastopen_cookie *foc);
const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);




u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
struct tcphdr *th, u32 *cookie);
u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
struct tcphdr *th, u32 *cookie);
u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct tcphdr *th);




void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
void tcp_v4_mtu_reduced(struct sock *sk);
void tcp_req_err(struct sock *sk, u32 seq, bool abort);
void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_create_openreq_child(const struct sock *sk,
struct request_sock *req,
struct sk_buff *skb);
void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst,
struct request_sock *req_unhash,
bool *own_req);
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int tcp_connect(struct sock *sk);
enum tcp_synack_type {
TCP_SYNACK_NORMAL,
TCP_SYNACK_FASTOPEN,
TCP_SYNACK_COOKIE,
};
struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
enum tcp_synack_type synack_type,
struct sk_buff *syn_skb);
int tcp_disconnect(struct sock *sk, int flags);

void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);


struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst, u32 tsoff);
int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
u32 cookie);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct sk_buff *skb);
# 584 "./include/net/tcp.h"
void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
void tcp_retransmit_timer(struct sock *sk);
void tcp_xmit_retransmit_queue(struct sock *);
void tcp_simple_retransmit(struct sock *);
void tcp_enter_recovery(struct sock *sk, bool ece_ack);
int tcp_trim_head(struct sock *, struct sk_buff *, u32);
enum tcp_queue {
TCP_FRAG_IN_WRITE_QUEUE,
TCP_FRAG_IN_RTX_QUEUE,
};
int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
struct sk_buff *skb, u32 len,
unsigned int mss_now, gfp_t gfp);

void tcp_send_probe0(struct sock *);
void tcp_send_partial(struct sock *);
int tcp_write_wakeup(struct sock *, int mib);
void tcp_send_fin(struct sock *sk);
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
int tcp_send_synack(struct sock *);
void tcp_push_one(struct sock *, unsigned int mss_now);
void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
void tcp_send_loss_probe(struct sock *sk);
bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
void tcp_skb_collapse_tstamp(struct sk_buff *skb,
const struct sk_buff *next_skb);


void tcp_rearm_rto(struct sock *sk);
void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
void tcp_reset(struct sock *sk, struct sk_buff *skb);
void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
void tcp_fin(struct sock *sk);
void tcp_check_space(struct sock *sk);


void tcp_init_xmit_timers(struct sock *);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_clear_xmit_timers(struct sock *sk)
{
if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
__sock_put(sk);

if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
__sock_put(sk);

inet_csk_clear_xmit_timers(sk);
}

unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
unsigned int tcp_current_mss(struct sock *sk);
u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
{
int cutoff;
# 655 "./include/net/tcp.h"
if (tp->max_window > 536U)
cutoff = (tp->max_window >> 1);
else
cutoff = tp->max_window;

if (cutoff && pktsize > cutoff)
return __builtin_choose_expr(((!!(sizeof((typeof((int)(cutoff)) *)1 == (typeof((int)(68U - tp->tcp_header_len)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(cutoff)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(68U - tp->tcp_header_len)) * 0l)) : (int *)8))))), (((int)(cutoff)) > ((int)(68U - tp->tcp_header_len)) ? ((int)(cutoff)) : ((int)(68U - tp->tcp_header_len))), ({ typeof((int)(cutoff)) __UNIQUE_ID___x586 = ((int)(cutoff)); typeof((int)(68U - tp->tcp_header_len)) __UNIQUE_ID___y587 = ((int)(68U - tp->tcp_header_len)); ((__UNIQUE_ID___x586) > (__UNIQUE_ID___y587) ? (__UNIQUE_ID___x586) : (__UNIQUE_ID___y587)); }));
else
return pktsize;
}


void tcp_get_info(struct sock *, struct tcp_info *);


int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);

void tcp_initialize_rcv_mss(struct sock *sk);

int tcp_mtu_to_mss(struct sock *sk, int pmtu);
int tcp_mss_to_mtu(struct sock *sk, int mss);
void tcp_mtup_init(struct sock *sk);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_bound_rto(const struct sock *sk)
{
if (inet_csk(sk)->icsk_rto > ((unsigned)(120*100)))
inet_csk(sk)->icsk_rto = ((unsigned)(120*100));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 __tcp_set_rto(const struct tcp_sock *tp)
{
return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
{

if (sk_is_mptcp((struct sock *)tp))
return;

tp->pred_flags = (( __be32)(__builtin_constant_p((__u32)(((tp->tcp_header_len << 26) | (__builtin_constant_p((__u32)(( __u32)(__be32)(TCP_FLAG_ACK))) ? ((__u32)( (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(TCP_FLAG_ACK))) | snd_wnd))) ? ((__u32)( (((__u32)(((tp->tcp_header_len << 26) | (__builtin_constant_p((__u32)(( __u32)(__be32)(TCP_FLAG_ACK))) ? ((__u32)( (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(TCP_FLAG_ACK))) | snd_wnd)) & (__u32)0x000000ffUL) << 24) | (((__u32)(((tp->tcp_header_len << 26) | (__builtin_constant_p((__u32)(( __u32)(__be32)(TCP_FLAG_ACK))) ? ((__u32)( (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(TCP_FLAG_ACK))) | snd_wnd)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(((tp->tcp_header_len << 26) | (__builtin_constant_p((__u32)(( __u32)(__be32)(TCP_FLAG_ACK))) ? ((__u32)( (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(TCP_FLAG_ACK))) | snd_wnd)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(((tp->tcp_header_len << 26) | (__builtin_constant_p((__u32)(( __u32)(__be32)(TCP_FLAG_ACK))) ? ((__u32)( (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(TCP_FLAG_ACK))) | snd_wnd)) & (__u32)0xff000000UL) >> 24))) : __fswab32(((tp->tcp_header_len << 26) | (__builtin_constant_p((__u32)(( __u32)(__be32)(TCP_FLAG_ACK))) ? ((__u32)( (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(TCP_FLAG_ACK)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(TCP_FLAG_ACK))) | snd_wnd))));


}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_fast_path_on(struct tcp_sock *tp)
{
__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_fast_path_check(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);

if ((({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_588(void) ; if (!((sizeof((&tp->out_of_order_queue)->rb_node) == sizeof(char) || sizeof((&tp->out_of_order_queue)->rb_node) == sizeof(short) || sizeof((&tp->out_of_order_queue)->rb_node) == sizeof(int) || sizeof((&tp->out_of_order_queue)->rb_node) == sizeof(long)) || sizeof((&tp->out_of_order_queue)->rb_node) == sizeof(long long))) __compiletime_assert_588(); } while (0); (*(const volatile typeof( _Generic(((&tp->out_of_order_queue)->rb_node), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&tp->out_of_order_queue)->rb_node))) *)&((&tp->out_of_order_queue)->rb_node)); }) == ((void *)0)) &&
tp->rcv_wnd &&
atomic_read(&sk->sk_backlog.rmem_alloc) < sk->sk_rcvbuf &&
!tp->urg_data)
tcp_fast_path_on(tp);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_rto_min(struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
u32 rto_min = inet_csk(sk)->icsk_rto_min;

if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
return rto_min;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_rto_min_us(struct sock *sk)
{
return jiffies_to_usecs(tcp_rto_min(sk));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_ca_dst_locked(const struct dst_entry *dst)
{
return dst_metric_locked(dst, RTAX_CC_ALGO);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_min_rtt(const struct tcp_sock *tp)
{
return minmax_get(&tp->rtt_min);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_receive_window(const struct tcp_sock *tp)
{
s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;

if (win < 0)
win = 0;
return (u32) win;
}





u32 __tcp_select_window(struct sock *sk);

void tcp_send_window_probe(struct sock *sk);
# 778 "./include/net/tcp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 tcp_clock_ns(void)
{
return ktime_get_ns();
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 tcp_clock_us(void)
{
return div_u64(tcp_clock_ns(), 1000L);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_time_stamp(const struct tcp_sock *tp)
{
return div_u64(tp->tcp_mstamp, 1000000L / 1000);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_ns_to_ts(u64 ns)
{
return div_u64(ns, 1000000000L / 1000);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_time_stamp_raw(void)
{
return tcp_ns_to_ts(tcp_clock_ns());
}

void tcp_mstamp_refresh(struct tcp_sock *tp);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_stamp_us_delta(u64 t1, u64 t0)
{
return __builtin_choose_expr(((!!(sizeof((typeof((s64)(t1 - t0)) *)1 == (typeof((s64)(0)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((s64)(t1 - t0)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((s64)(0)) * 0l)) : (int *)8))))), (((s64)(t1 - t0)) > ((s64)(0)) ? ((s64)(t1 - t0)) : ((s64)(0))), ({ typeof((s64)(t1 - t0)) __UNIQUE_ID___x589 = ((s64)(t1 - t0)); typeof((s64)(0)) __UNIQUE_ID___y590 = ((s64)(0)); ((__UNIQUE_ID___x589) > (__UNIQUE_ID___y590) ? (__UNIQUE_ID___x589) : (__UNIQUE_ID___y590)); }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_skb_timestamp(const struct sk_buff *skb)
{
return tcp_ns_to_ts(skb->skb_mstamp_ns);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
{
return div_u64(skb->skb_mstamp_ns, 1000L);
}
# 844 "./include/net/tcp.h"
struct tcp_skb_cb {
__u32 seq;
__u32 end_seq;
union {






__u32 tcp_tw_isn;
struct {
u16 tcp_gso_segs;
u16 tcp_gso_size;
};
};
__u8 tcp_flags;

__u8 sacked;
# 872 "./include/net/tcp.h"
__u8 ip_dsfield;
__u8 txstamp_ack:1,
eor:1,
has_rxtstamp:1,
unused:5;
__u32 ack_seq;
union {
struct {


__u32 is_app_limited:1,
delivered_ce:20,
unused:11;

__u32 delivered;

u64 first_tx_mstamp;

u64 delivered_mstamp;
} tx;
union {
struct inet_skb_parm h4;

struct inet6_skb_parm h6;

} header;
};
};



extern const struct inet_connection_sock_af_ops ipv4_specific;





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_v6_iif(const struct sk_buff *skb)
{
return ((struct tcp_skb_cb *)&((skb)->cb[0]))->header.h6.iif;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
{
bool l3_slave = ipv6_l3mdev_skb(((struct tcp_skb_cb *)&((skb)->cb[0]))->header.h6.flags);

return l3_slave ? skb->skb_iif : ((struct tcp_skb_cb *)&((skb)->cb[0]))->header.h6.iif;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_v6_sdif(const struct sk_buff *skb)
{




return 0;
}

extern const struct inet_connection_sock_af_ops ipv6_specific;

;
;
;




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_v4_sdif(struct sk_buff *skb)
{




return 0;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_skb_pcount(const struct sk_buff *skb)
{
return ((struct tcp_skb_cb *)&((skb)->cb[0]))->tcp_gso_segs;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
{
((struct tcp_skb_cb *)&((skb)->cb[0]))->tcp_gso_segs = segs;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
{
((struct tcp_skb_cb *)&((skb)->cb[0]))->tcp_gso_segs += segs;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_skb_mss(const struct sk_buff *skb)
{
return ((struct tcp_skb_cb *)&((skb)->cb[0]))->tcp_gso_size;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
{
return __builtin_expect(!!(!((struct tcp_skb_cb *)&((skb)->cb[0]))->eor), 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
return __builtin_expect(!!(tcp_skb_can_collapse_to(to) && mptcp_skb_can_collapse(to, from) && skb_pure_zcopy_same(to, from)), 1);


}


enum tcp_ca_event {
CA_EVENT_TX_START,
CA_EVENT_CWND_RESTART,
CA_EVENT_COMPLETE_CWR,
CA_EVENT_LOSS,
CA_EVENT_ECN_NO_CE,
CA_EVENT_ECN_IS_CE,
};


enum tcp_ca_ack_event_flags {
CA_ACK_SLOWPATH = (1 << 0),
CA_ACK_WIN_UPDATE = (1 << 1),
CA_ACK_ECE = (1 << 2),
};
# 1018 "./include/net/tcp.h"
union tcp_cc_info;

struct ack_sample {
u32 pkts_acked;
s32 rtt_us;
u32 in_flight;
};
# 1034 "./include/net/tcp.h"
struct rate_sample {
u64 prior_mstamp;
u32 prior_delivered;
u32 prior_delivered_ce;
s32 delivered;
s32 delivered_ce;
long interval_us;
u32 snd_interval_us;
u32 rcv_interval_us;
long rtt_us;
int losses;
u32 acked_sacked;
u32 prior_in_flight;
u32 last_end_seq;
bool is_app_limited;
bool is_retrans;
bool is_ack_delayed;
};

struct tcp_congestion_ops {



u32 (*ssthresh)(struct sock *sk);


void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);


void (*set_state)(struct sock *sk, u8 new_state);


void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);


void (*in_ack_event)(struct sock *sk, u32 flags);


void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);


u32 (*min_tso_segs)(struct sock *sk);




void (*cong_control)(struct sock *sk, const struct rate_sample *rs);



u32 (*undo_cwnd)(struct sock *sk);

u32 (*sndbuf_expand)(struct sock *sk);



size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info);

char name[16];
struct module *owner;
struct list_head list;
u32 key;
u32 flags;


void (*init)(struct sock *sk);

void (*release)(struct sock *sk);
} __attribute__((__aligned__((1 << 6))));

int tcp_register_congestion_control(struct tcp_congestion_ops *type);
void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);

void tcp_assign_congestion_control(struct sock *sk);
void tcp_init_congestion_control(struct sock *sk);
void tcp_cleanup_congestion_control(struct sock *sk);
int tcp_set_default_congestion_control(struct net *net, const char *name);
void tcp_get_default_congestion_control(struct net *net, char *name);
void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
bool cap_net_admin);
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);

u32 tcp_reno_ssthresh(struct sock *sk);
u32 tcp_reno_undo_cwnd(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;

struct tcp_congestion_ops *tcp_ca_find(const char *name);
struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);

char *tcp_ca_get_name_by_key(u32 key, char *buffer);







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_ca_needs_ecn(const struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);

return icsk->icsk_ca_ops->flags & 0x2;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
{
struct inet_connection_sock *icsk = inet_csk(sk);

if (icsk->icsk_ca_ops->set_state)
icsk->icsk_ca_ops->set_state(sk, ca_state);
icsk->icsk_ca_state = ca_state;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
{
const struct inet_connection_sock *icsk = inet_csk(sk);

if (icsk->icsk_ca_ops->cwnd_event)
icsk->icsk_ca_ops->cwnd_event(sk, event);
}


void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
struct rate_sample *rs);
void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
bool is_sack_reneg, struct rate_sample *rs);
void tcp_rate_check_app_limited(struct sock *sk);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
{
return t1 > t2 || (t1 == t2 && before(seq2, seq1));
}
# 1182 "./include/net/tcp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_is_sack(const struct tcp_sock *tp)
{
return __builtin_expect(!!(tp->rx_opt.sack_ok), 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_is_reno(const struct tcp_sock *tp)
{
return !tcp_is_sack(tp);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int tcp_left_out(const struct tcp_sock *tp)
{
return tp->sacked_out + tp->lost_out;
}
# 1211 "./include/net/tcp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
{
return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_in_slow_start(const struct tcp_sock *tp)
{
return tp->snd_cwnd < tp->snd_ssthresh;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
{
return tp->snd_ssthresh >= 0x7fffffff;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_in_cwnd_reduction(const struct sock *sk)
{
return ((1<<TCP_CA_CWR) | (1<<TCP_CA_Recovery)) &
(1 << inet_csk(sk)->icsk_ca_state);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 tcp_current_ssthresh(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);

if (tcp_in_cwnd_reduction(sk))
return tp->snd_ssthresh;
else
return __builtin_choose_expr(((!!(sizeof((typeof(tp->snd_ssthresh) *)1 == (typeof(((tp->snd_cwnd >> 1) + (tp->snd_cwnd >> 2))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(tp->snd_ssthresh) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(((tp->snd_cwnd >> 1) + (tp->snd_cwnd >> 2))) * 0l)) : (int *)8))))), ((tp->snd_ssthresh) > (((tp->snd_cwnd >> 1) + (tp->snd_cwnd >> 2))) ? (tp->snd_ssthresh) : (((tp->snd_cwnd >> 1) + (tp->snd_cwnd >> 2)))), ({ typeof(tp->snd_ssthresh) __UNIQUE_ID___x591 = (tp->snd_ssthresh); typeof(((tp->snd_cwnd >> 1) + (tp->snd_cwnd >> 2))) __UNIQUE_ID___y592 = (((tp->snd_cwnd >> 1) + (tp->snd_cwnd >> 2))); ((__UNIQUE_ID___x591) > (__UNIQUE_ID___y592) ? (__UNIQUE_ID___x591) : (__UNIQUE_ID___y592)); }));


}




void tcp_enter_cwr(struct sock *sk);
__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
{
return 3;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_wnd_end(const struct tcp_sock *tp)
{
return tp->snd_una + tp->snd_wnd;
}
# 1283 "./include/net/tcp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_is_cwnd_limited(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);


if (tcp_in_slow_start(tp))
return tp->snd_cwnd < 2 * tp->max_packets_out;

return tp->is_cwnd_limited;
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_needs_internal_pacing(const struct sock *sk)
{
return ({ typeof(*&sk->sk_pacing_status) ___p1 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_593(void) ; if (!((sizeof(*&sk->sk_pacing_status) == sizeof(char) || sizeof(*&sk->sk_pacing_status) == sizeof(short) || sizeof(*&sk->sk_pacing_status) == sizeof(int) || sizeof(*&sk->sk_pacing_status) == sizeof(long)) || sizeof(*&sk->sk_pacing_status) == sizeof(long long))) __compiletime_assert_593(); } while (0); (*(const volatile typeof( _Generic((*&sk->sk_pacing_status), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&sk->sk_pacing_status))) *)&(*&sk->sk_pacing_status)); }); do { __attribute__((__noreturn__)) extern void __compiletime_assert_594(void) ; if (!((sizeof(*&sk->sk_pacing_status) == sizeof(char) || sizeof(*&sk->sk_pacing_status) == sizeof(short) || sizeof(*&sk->sk_pacing_status) == sizeof(int) || sizeof(*&sk->sk_pacing_status) == sizeof(long)))) __compiletime_assert_594(); } while (0); __asm__ __volatile__ ("fence " "r" "," "rw" : : : "memory"); ___p1; }) == SK_PACING_NEEDED;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long tcp_pacing_delay(const struct sock *sk)
{
s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;

return delay > 0 ? nsecs_to_jiffies(delay) : 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_reset_xmit_timer(struct sock *sk,
const int what,
unsigned long when,
const unsigned long max_when)
{
inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
max_when);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long tcp_probe0_base(const struct sock *sk)
{
return __builtin_choose_expr(((!!(sizeof((typeof((unsigned long)(inet_csk(sk)->icsk_rto)) *)1 == (typeof((unsigned long)(((unsigned)(100/5)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned long)(inet_csk(sk)->icsk_rto)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned long)(((unsigned)(100/5)))) * 0l)) : (int *)8))))), (((unsigned long)(inet_csk(sk)->icsk_rto)) > ((unsigned long)(((unsigned)(100/5)))) ? ((unsigned long)(inet_csk(sk)->icsk_rto)) : ((unsigned long)(((unsigned)(100/5))))), ({ typeof((unsigned long)(inet_csk(sk)->icsk_rto)) __UNIQUE_ID___x595 = ((unsigned long)(inet_csk(sk)->icsk_rto)); typeof((unsigned long)(((unsigned)(100/5)))) __UNIQUE_ID___y596 = ((unsigned long)(((unsigned)(100/5)))); ((__UNIQUE_ID___x595) > (__UNIQUE_ID___y596) ? (__UNIQUE_ID___x595) : (__UNIQUE_ID___y596)); }));
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long tcp_probe0_when(const struct sock *sk,
unsigned long max_when)
{
u8 backoff = __builtin_choose_expr(((!!(sizeof((typeof((u8)(( __builtin_constant_p(((unsigned)(120*100)) / ((unsigned)(100/5))) ? ((((unsigned)(120*100)) / ((unsigned)(100/5))) < 2 ? 0 : 63 - __builtin_clzll(((unsigned)(120*100)) / ((unsigned)(100/5)))) : (sizeof(((unsigned)(120*100)) / ((unsigned)(100/5))) <= 4) ? __ilog2_u32(((unsigned)(120*100)) / ((unsigned)(100/5))) : __ilog2_u64(((unsigned)(120*100)) / ((unsigned)(100/5))) ) + 1)) *)1 == (typeof((u8)(inet_csk(sk)->icsk_backoff)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u8)(( __builtin_constant_p(((unsigned)(120*100)) / ((unsigned)(100/5))) ? ((((unsigned)(120*100)) / ((unsigned)(100/5))) < 2 ? 0 : 63 - __builtin_clzll(((unsigned)(120*100)) / ((unsigned)(100/5)))) : (sizeof(((unsigned)(120*100)) / ((unsigned)(100/5))) <= 4) ? __ilog2_u32(((unsigned)(120*100)) / ((unsigned)(100/5))) : __ilog2_u64(((unsigned)(120*100)) / ((unsigned)(100/5))) ) + 1)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u8)(inet_csk(sk)->icsk_backoff)) * 0l)) : (int *)8))))), (((u8)(( __builtin_constant_p(((unsigned)(120*100)) / ((unsigned)(100/5))) ? ((((unsigned)(120*100)) / ((unsigned)(100/5))) < 2 ? 0 : 63 - __builtin_clzll(((unsigned)(120*100)) / ((unsigned)(100/5)))) : (sizeof(((unsigned)(120*100)) / ((unsigned)(100/5))) <= 4) ? __ilog2_u32(((unsigned)(120*100)) / ((unsigned)(100/5))) : __ilog2_u64(((unsigned)(120*100)) / ((unsigned)(100/5))) ) + 1)) < ((u8)(inet_csk(sk)->icsk_backoff)) ? ((u8)(( __builtin_constant_p(((unsigned)(120*100)) / ((unsigned)(100/5))) ? ((((unsigned)(120*100)) / ((unsigned)(100/5))) < 2 ? 0 : 63 - __builtin_clzll(((unsigned)(120*100)) / ((unsigned)(100/5)))) : (sizeof(((unsigned)(120*100)) / ((unsigned)(100/5))) <= 4) ? __ilog2_u32(((unsigned)(120*100)) / ((unsigned)(100/5))) : __ilog2_u64(((unsigned)(120*100)) / ((unsigned)(100/5))) ) + 1)) : ((u8)(inet_csk(sk)->icsk_backoff))), ({ typeof((u8)(( __builtin_constant_p(((unsigned)(120*100)) / ((unsigned)(100/5))) ? ((((unsigned)(120*100)) / ((unsigned)(100/5))) < 2 ? 0 : 63 - __builtin_clzll(((unsigned)(120*100)) / ((unsigned)(100/5)))) : (sizeof(((unsigned)(120*100)) / ((unsigned)(100/5))) <= 4) ? __ilog2_u32(((unsigned)(120*100)) / ((unsigned)(100/5))) : __ilog2_u64(((unsigned)(120*100)) / ((unsigned)(100/5))) ) + 1)) __UNIQUE_ID___x597 = ((u8)(( __builtin_constant_p(((unsigned)(120*100)) / ((unsigned)(100/5))) ? ((((unsigned)(120*100)) / ((unsigned)(100/5))) < 2 ? 0 : 63 - __builtin_clzll(((unsigned)(120*100)) / ((unsigned)(100/5)))) : (sizeof(((unsigned)(120*100)) / ((unsigned)(100/5))) <= 4) ? __ilog2_u32(((unsigned)(120*100)) / ((unsigned)(100/5))) : __ilog2_u64(((unsigned)(120*100)) / ((unsigned)(100/5))) ) + 1)); typeof((u8)(inet_csk(sk)->icsk_backoff)) __UNIQUE_ID___y598 = ((u8)(inet_csk(sk)->icsk_backoff)); ((__UNIQUE_ID___x597) < (__UNIQUE_ID___y598) ? (__UNIQUE_ID___x597) : (__UNIQUE_ID___y598)); }));

u64 when = (u64)tcp_probe0_base(sk) << backoff;

return (unsigned long)__builtin_choose_expr(((!!(sizeof((typeof((u64)(when)) *)1 == (typeof((u64)(max_when)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(when)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(max_when)) * 0l)) : (int *)8))))), (((u64)(when)) < ((u64)(max_when)) ? ((u64)(when)) : ((u64)(max_when))), ({ typeof((u64)(when)) __UNIQUE_ID___x599 = ((u64)(when)); typeof((u64)(max_when)) __UNIQUE_ID___y600 = ((u64)(max_when)); ((__UNIQUE_ID___x599) < (__UNIQUE_ID___y600) ? (__UNIQUE_ID___x599) : (__UNIQUE_ID___y600)); }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_check_probe_timer(struct sock *sk)
{
if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
tcp_reset_xmit_timer(sk, 3,
tcp_probe0_base(sk), ((unsigned)(120*100)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_init_wl(struct tcp_sock *tp, u32 seq)
{
tp->snd_wl1 = seq;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_update_wl(struct tcp_sock *tp, u32 seq)
{
tp->snd_wl1 = seq;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __sum16 tcp_v4_check(int len, __be32 saddr,
__be32 daddr, __wsum base)
{
return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_checksum_complete(struct sk_buff *skb)
{
return !skb_csum_unnecessary(skb) &&
__skb_checksum_complete(skb);
}

bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason *reason);


void __sk_defer_free_flush(struct sock *sk);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void sk_defer_free_flush(struct sock *sk)
{
if (llist_empty(&sk->defer_list))
return;
__sk_defer_free_flush(sk);
}




int tcp_filter(struct sock *sk, struct sk_buff *skb);
void tcp_set_state(struct sock *sk, int state);
void tcp_done(struct sock *sk);
int tcp_abort(struct sock *sk, int err);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_sack_reset(struct tcp_options_received *rx_opt)
{
rx_opt->dsack = 0;
rx_opt->num_sacks = 0;
}

void tcp_cwnd_restart(struct sock *sk, s32 delta);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_slow_start_after_idle_check(struct sock *sk)
{
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
struct tcp_sock *tp = tcp_sk(sk);
s32 delta;

if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
ca_ops->cong_control)
return;
delta = ((u32)jiffies) - tp->lsndtime;
if (delta > inet_csk(sk)->icsk_rto)
tcp_cwnd_restart(sk, delta);
}


void tcp_select_initial_window(const struct sock *sk, int __space,
__u32 mss, __u32 *rcv_wnd,
__u32 *window_clamp, int wscale_ok,
__u8 *rcv_wscale, __u32 init_rcv_wnd);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_win_from_space(const struct sock *sk, int space)
{
int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;

return tcp_adv_win_scale <= 0 ?
(space>>(-tcp_adv_win_scale)) :
space - (space>>tcp_adv_win_scale);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_space(const struct sock *sk)
{
return tcp_win_from_space(sk, ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_601(void) ; if (!((sizeof(sk->sk_rcvbuf) == sizeof(char) || sizeof(sk->sk_rcvbuf) == sizeof(short) || sizeof(sk->sk_rcvbuf) == sizeof(int) || sizeof(sk->sk_rcvbuf) == sizeof(long)) || sizeof(sk->sk_rcvbuf) == sizeof(long long))) __compiletime_assert_601(); } while (0); (*(const volatile typeof( _Generic((sk->sk_rcvbuf), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvbuf))) *)&(sk->sk_rcvbuf)); }) -
({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_602(void) ; if (!((sizeof(sk->sk_backlog.len) == sizeof(char) || sizeof(sk->sk_backlog.len) == sizeof(short) || sizeof(sk->sk_backlog.len) == sizeof(int) || sizeof(sk->sk_backlog.len) == sizeof(long)) || sizeof(sk->sk_backlog.len) == sizeof(long long))) __compiletime_assert_602(); } while (0); (*(const volatile typeof( _Generic((sk->sk_backlog.len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_backlog.len))) *)&(sk->sk_backlog.len)); }) -
atomic_read(&sk->sk_backlog.rmem_alloc));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_full_space(const struct sock *sk)
{
return tcp_win_from_space(sk, ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_603(void) ; if (!((sizeof(sk->sk_rcvbuf) == sizeof(char) || sizeof(sk->sk_rcvbuf) == sizeof(short) || sizeof(sk->sk_rcvbuf) == sizeof(int) || sizeof(sk->sk_rcvbuf) == sizeof(long)) || sizeof(sk->sk_rcvbuf) == sizeof(long long))) __compiletime_assert_603(); } while (0); (*(const volatile typeof( _Generic((sk->sk_rcvbuf), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvbuf))) *)&(sk->sk_rcvbuf)); }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_adjust_rcv_ssthresh(struct sock *sk)
{
int unused_mem = sk_unused_reserved_mem(sk);
struct tcp_sock *tp = tcp_sk(sk);

tp->rcv_ssthresh = __builtin_choose_expr(((!!(sizeof((typeof(tp->rcv_ssthresh) *)1 == (typeof(4U * tp->advmss) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(tp->rcv_ssthresh) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(4U * tp->advmss) * 0l)) : (int *)8))))), ((tp->rcv_ssthresh) < (4U * tp->advmss) ? (tp->rcv_ssthresh) : (4U * tp->advmss)), ({ typeof(tp->rcv_ssthresh) __UNIQUE_ID___x604 = (tp->rcv_ssthresh); typeof(4U * tp->advmss) __UNIQUE_ID___y605 = (4U * tp->advmss); ((__UNIQUE_ID___x604) < (__UNIQUE_ID___y605) ? (__UNIQUE_ID___x604) : (__UNIQUE_ID___y605)); }));
if (unused_mem)
tp->rcv_ssthresh = __builtin_choose_expr(((!!(sizeof((typeof((u32)(tp->rcv_ssthresh)) *)1 == (typeof((u32)(tcp_win_from_space(sk, unused_mem))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u32)(tp->rcv_ssthresh)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u32)(tcp_win_from_space(sk, unused_mem))) * 0l)) : (int *)8))))), (((u32)(tp->rcv_ssthresh)) > ((u32)(tcp_win_from_space(sk, unused_mem))) ? ((u32)(tp->rcv_ssthresh)) : ((u32)(tcp_win_from_space(sk, unused_mem)))), ({ typeof((u32)(tp->rcv_ssthresh)) __UNIQUE_ID___x606 = ((u32)(tp->rcv_ssthresh)); typeof((u32)(tcp_win_from_space(sk, unused_mem))) __UNIQUE_ID___y607 = ((u32)(tcp_win_from_space(sk, unused_mem))); ((__UNIQUE_ID___x606) > (__UNIQUE_ID___y607) ? (__UNIQUE_ID___x606) : (__UNIQUE_ID___y607)); }));

}

void tcp_cleanup_rbuf(struct sock *sk, int copied);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_rmem_pressure(const struct sock *sk)
{
int rcvbuf, threshold;

if (tcp_under_memory_pressure(sk))
return true;

rcvbuf = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_608(void) ; if (!((sizeof(sk->sk_rcvbuf) == sizeof(char) || sizeof(sk->sk_rcvbuf) == sizeof(short) || sizeof(sk->sk_rcvbuf) == sizeof(int) || sizeof(sk->sk_rcvbuf) == sizeof(long)) || sizeof(sk->sk_rcvbuf) == sizeof(long long))) __compiletime_assert_608(); } while (0); (*(const volatile typeof( _Generic((sk->sk_rcvbuf), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvbuf))) *)&(sk->sk_rcvbuf)); });
threshold = rcvbuf - (rcvbuf >> 3);

return atomic_read(&sk->sk_backlog.rmem_alloc) > threshold;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_epollin_ready(const struct sock *sk, int target)
{
const struct tcp_sock *tp = tcp_sk(sk);
int avail = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_609(void) ; if (!((sizeof(tp->rcv_nxt) == sizeof(char) || sizeof(tp->rcv_nxt) == sizeof(short) || sizeof(tp->rcv_nxt) == sizeof(int) || sizeof(tp->rcv_nxt) == sizeof(long)) || sizeof(tp->rcv_nxt) == sizeof(long long))) __compiletime_assert_609(); } while (0); (*(const volatile typeof( _Generic((tp->rcv_nxt), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (tp->rcv_nxt))) *)&(tp->rcv_nxt)); }) - ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_610(void) ; if (!((sizeof(tp->copied_seq) == sizeof(char) || sizeof(tp->copied_seq) == sizeof(short) || sizeof(tp->copied_seq) == sizeof(int) || sizeof(tp->copied_seq) == sizeof(long)) || sizeof(tp->copied_seq) == sizeof(long long))) __compiletime_assert_610(); } while (0); (*(const volatile typeof( _Generic((tp->copied_seq), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (tp->copied_seq))) *)&(tp->copied_seq)); });

if (avail <= 0)
return false;

return (avail >= target) || tcp_rmem_pressure(sk) ||
(tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
}

extern void tcp_openreq_init_rwin(struct request_sock *req,
const struct sock *sk_listener,
const struct dst_entry *dst);

void tcp_enter_memory_pressure(struct sock *sk);
void tcp_leave_memory_pressure(struct sock *sk);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int keepalive_intvl_when(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);

return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int keepalive_time_when(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);

return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int keepalive_probes(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);

return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 keepalive_time_elapsed(const struct tcp_sock *tp)
{
const struct inet_connection_sock *icsk = &tp->inet_conn;

return __builtin_choose_expr(((!!(sizeof((typeof((u32)(((u32)jiffies) - icsk->icsk_ack.lrcvtime)) *)1 == (typeof((u32)(((u32)jiffies) - tp->rcv_tstamp)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u32)(((u32)jiffies) - icsk->icsk_ack.lrcvtime)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u32)(((u32)jiffies) - tp->rcv_tstamp)) * 0l)) : (int *)8))))), (((u32)(((u32)jiffies) - icsk->icsk_ack.lrcvtime)) < ((u32)(((u32)jiffies) - tp->rcv_tstamp)) ? ((u32)(((u32)jiffies) - icsk->icsk_ack.lrcvtime)) : ((u32)(((u32)jiffies) - tp->rcv_tstamp))), ({ typeof((u32)(((u32)jiffies) - icsk->icsk_ack.lrcvtime)) __UNIQUE_ID___x611 = ((u32)(((u32)jiffies) - icsk->icsk_ack.lrcvtime)); typeof((u32)(((u32)jiffies) - tp->rcv_tstamp)) __UNIQUE_ID___y612 = ((u32)(((u32)jiffies) - tp->rcv_tstamp)); ((__UNIQUE_ID___x611) < (__UNIQUE_ID___y612) ? (__UNIQUE_ID___x611) : (__UNIQUE_ID___y612)); }));

}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_fin_time(const struct sock *sk)
{
int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
const int rto = inet_csk(sk)->icsk_rto;

if (fin_timeout < (rto << 2) - (rto >> 1))
fin_timeout = (rto << 2) - (rto >> 1);

return fin_timeout;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_paws_check(const struct tcp_options_received *rx_opt,
int paws_win)
{
if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
return true;
if (__builtin_expect(!!(!((s32)((u32)(ktime_get_seconds()) - (u32)(rx_opt->ts_recent_stamp + (60 * 60 * 24 * 24))) < 0)), 0))

return true;





if (!rx_opt->ts_recent)
return true;
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
int rst)
{
if (tcp_paws_check(rx_opt, 0))
return false;
# 1575 "./include/net/tcp.h"
if (rst && !((s32)((u32)(ktime_get_seconds()) - (u32)(rx_opt->ts_recent_stamp + 60)) < 0))

return false;
return true;
}

bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
int mib_idx, u32 *last_oow_ack_time);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_mib_init(struct net *net)
{

do { do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM])) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOALGORITHM])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
do { do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN])) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += ((unsigned)(100/5))*1000/100; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += ((unsigned)(100/5))*1000/100; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += ((unsigned)(100/5))*1000/100; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMIN])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += ((unsigned)(100/5))*1000/100; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
do { do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX])) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += ((unsigned)(120*100))*1000/100; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += ((unsigned)(120*100))*1000/100; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += ((unsigned)(120*100))*1000/100; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_RTOMAX])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += ((unsigned)(120*100))*1000/100; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
do { do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN])) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN]))); (typeof((typeof(*(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN]))) *)(&((net)->mib.tcp_statistics->mibs[TCP_MIB_MAXCONN])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
{
tp->lost_skb_hint = ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
{
tcp_clear_retrans_hints_partial(tp);
tp->retransmit_skb_hint = ((void *)0);
}

union tcp_md5_addr {
struct in_addr a4;

struct in6_addr a6;

};


struct tcp_md5sig_key {
struct hlist_node node;
u8 keylen;
u8 family;
u8 prefixlen;
u8 flags;
union tcp_md5_addr addr;
int l3index;
u8 key[80];
struct callback_head rcu;
};


struct tcp_md5sig_info {
struct hlist_head head;
struct callback_head rcu;
};


struct tcp4_pseudohdr {
__be32 saddr;
__be32 daddr;
__u8 pad;
__u8 protocol;
__be16 len;
};

struct tcp6_pseudohdr {
struct in6_addr saddr;
struct in6_addr daddr;
__be32 len;
__be32 protocol;
};

union tcp_md5sum_block {
struct tcp4_pseudohdr ip4;

struct tcp6_pseudohdr ip6;

};


struct tcp_md5sig_pool {
struct ahash_request *md5_req;
void *scratch;
};


int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index, u8 flags,
const u8 *newkey, u8 newkeylen, gfp_t gfp);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index, u8 flags);
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
# 1694 "./include/net/tcp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr, int family)
{
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
const void *saddr, const void *daddr,
int family, int dif, int sdif)
{
return SKB_NOT_DROPPED_YET;
}



bool tcp_alloc_md5sig_pool(void);

struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_put_md5sig_pool(void)
{
local_bh_enable();
}

int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
unsigned int header_len);
int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
const struct tcp_md5sig_key *key);


void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
struct tcp_fastopen_cookie *cookie);
void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
struct tcp_fastopen_cookie *cookie, bool syn_lost,
u16 try_exp);
struct tcp_fastopen_request {

struct tcp_fastopen_cookie cookie;
struct msghdr *data;
size_t size;
int copied;
struct ubuf_info *uarg;
};
void tcp_free_fastopen_req(struct tcp_sock *tp);
void tcp_fastopen_destroy_cipher(struct sock *sk);
void tcp_fastopen_ctx_destroy(struct net *net);
int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
void *primary_key, void *backup_key);
int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
u64 *key);
void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
const struct dst_entry *dst);
void tcp_fastopen_init_key_once(struct net *net);
bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
struct tcp_fastopen_cookie *cookie);
bool tcp_fastopen_defer_connect(struct sock *sk, int *err);






struct tcp_fastopen_context {
siphash_key_t key[2];
int num;
struct callback_head rcu;
};

void tcp_fastopen_active_disable(struct sock *sk);
bool tcp_fastopen_active_should_disable(struct sock *sk);
void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
{
struct tcp_fastopen_context *ctx;

ctx = ({ typeof(*(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx)) *__UNIQUE_ID_rcu613 = (typeof(*(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_614(void) ; if (!((sizeof((inet_csk(sk)->icsk_accept_queue.fastopenq.ctx)) == sizeof(char) || sizeof((inet_csk(sk)->icsk_accept_queue.fastopenq.ctx)) == sizeof(short) || sizeof((inet_csk(sk)->icsk_accept_queue.fastopenq.ctx)) == sizeof(int) || sizeof((inet_csk(sk)->icsk_accept_queue.fastopenq.ctx)) == sizeof(long)) || sizeof((inet_csk(sk)->icsk_accept_queue.fastopenq.ctx)) == sizeof(long long))) __compiletime_assert_614(); } while (0); (*(const volatile typeof( _Generic(((inet_csk(sk)->icsk_accept_queue.fastopenq.ctx)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((inet_csk(sk)->icsk_accept_queue.fastopenq.ctx)))) *)&((inet_csk(sk)->icsk_accept_queue.fastopenq.ctx))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx)) *)(__UNIQUE_ID_rcu613)); });
if (!ctx)
ctx = ({ typeof(*(sock_net(sk)->ipv4.tcp_fastopen_ctx)) *__UNIQUE_ID_rcu615 = (typeof(*(sock_net(sk)->ipv4.tcp_fastopen_ctx)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_616(void) ; if (!((sizeof((sock_net(sk)->ipv4.tcp_fastopen_ctx)) == sizeof(char) || sizeof((sock_net(sk)->ipv4.tcp_fastopen_ctx)) == sizeof(short) || sizeof((sock_net(sk)->ipv4.tcp_fastopen_ctx)) == sizeof(int) || sizeof((sock_net(sk)->ipv4.tcp_fastopen_ctx)) == sizeof(long)) || sizeof((sock_net(sk)->ipv4.tcp_fastopen_ctx)) == sizeof(long long))) __compiletime_assert_616(); } while (0); (*(const volatile typeof( _Generic(((sock_net(sk)->ipv4.tcp_fastopen_ctx)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((sock_net(sk)->ipv4.tcp_fastopen_ctx)))) *)&((sock_net(sk)->ipv4.tcp_fastopen_ctx))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(sock_net(sk)->ipv4.tcp_fastopen_ctx)) *)(__UNIQUE_ID_rcu615)); });
return ctx;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
const struct tcp_fastopen_cookie *orig)
{
if (orig->len == 8 &&
orig->len == foc->len &&
!memcmp(orig->val, foc->val, foc->len))
return true;
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
{
return ctx->num;
}




enum tcp_chrono {
TCP_CHRONO_UNSPEC,
TCP_CHRONO_BUSY,
TCP_CHRONO_RWND_LIMITED,
TCP_CHRONO_SNDBUF_LIMITED,
__TCP_CHRONO_MAX,
};

void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
{
skb->destructor = ((void *)0);
skb->_skb_refdst = 0UL;
}
# 1831 "./include/net/tcp.h"
void tcp_write_queue_purge(struct sock *sk);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
{
return ({ typeof(rb_first(&sk->tcp_rtx_queue)) ____ptr = (rb_first(&sk->tcp_rtx_queue)); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((struct sk_buff *)0)->rbnode)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((struct sk_buff *)(__mptr - __builtin_offsetof(struct sk_buff, rbnode))); }) : ((void *)0); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
{
return ({ typeof(rb_last(&sk->tcp_rtx_queue)) ____ptr = (rb_last(&sk->tcp_rtx_queue)); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((struct sk_buff *)0)->rbnode)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((struct sk_buff *)(__mptr - __builtin_offsetof(struct sk_buff, rbnode))); }) : ((void *)0); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
{
return skb_peek_tail(&sk->sk_write_queue);
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *tcp_send_head(const struct sock *sk)
{
return skb_peek(&sk->sk_write_queue);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_skb_is_last(const struct sock *sk,
const struct sk_buff *skb)
{
return skb_queue_is_last(&sk->sk_write_queue, skb);
}
# 1869 "./include/net/tcp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_write_queue_empty(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);

return tp->write_seq == tp->snd_nxt;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_rtx_queue_empty(const struct sock *sk)
{
return (({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_617(void) ; if (!((sizeof((&sk->tcp_rtx_queue)->rb_node) == sizeof(char) || sizeof((&sk->tcp_rtx_queue)->rb_node) == sizeof(short) || sizeof((&sk->tcp_rtx_queue)->rb_node) == sizeof(int) || sizeof((&sk->tcp_rtx_queue)->rb_node) == sizeof(long)) || sizeof((&sk->tcp_rtx_queue)->rb_node) == sizeof(long long))) __compiletime_assert_617(); } while (0); (*(const volatile typeof( _Generic(((&sk->tcp_rtx_queue)->rb_node), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&sk->tcp_rtx_queue)->rb_node))) *)&((&sk->tcp_rtx_queue)->rb_node)); }) == ((void *)0));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
{
return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
{
__skb_queue_tail(&sk->sk_write_queue, skb);


if (sk->sk_write_queue.next == skb)
tcp_chrono_start(sk, TCP_CHRONO_BUSY);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_insert_write_queue_before(struct sk_buff *new,
struct sk_buff *skb,
struct sock *sk)
{
__skb_queue_before(&sk->sk_write_queue, skb, new);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
{
tcp_skb_tsorted_anchor_cleanup(skb);
__skb_unlink(skb, &sk->sk_write_queue);
}

void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
{
tcp_skb_tsorted_anchor_cleanup(skb);
rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
{
list_del(&skb->tcp_tsorted_anchor);
tcp_rtx_queue_unlink(skb, sk);
tcp_wmem_free_skb(sk, skb);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_push_pending_frames(struct sock *sk)
{
if (tcp_send_head(sk)) {
struct tcp_sock *tp = tcp_sk(sk);

__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
}
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_highest_sack_seq(struct tcp_sock *tp)
{
if (!tp->sacked_out)
return tp->snd_una;

if (tp->highest_sack == ((void *)0))
return tp->snd_nxt;

return ((struct tcp_skb_cb *)&((tp->highest_sack)->cb[0]))->seq;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
{
tcp_sk(sk)->highest_sack = ({ typeof(rb_next(&(skb)->rbnode)) ____ptr = (rb_next(&(skb)->rbnode)); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((struct sk_buff *)0)->rbnode)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((struct sk_buff *)(__mptr - __builtin_offsetof(struct sk_buff, rbnode))); }) : ((void *)0); });
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *tcp_highest_sack(struct sock *sk)
{
return tcp_sk(sk)->highest_sack;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_highest_sack_reset(struct sock *sk)
{
tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_highest_sack_replace(struct sock *sk,
struct sk_buff *old,
struct sk_buff *new)
{
if (old == tcp_highest_sack(sk))
tcp_sk(sk)->highest_sack = new;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool inet_sk_transparent(const struct sock *sk)
{
switch (sk->__sk_common.skc_state) {
case TCP_TIME_WAIT:
return inet_twsk(sk)->tw_transparent;
case TCP_NEW_SYN_RECV:
return inet_rsk(inet_reqsk(sk))->no_srccheck;
}
return inet_sk(sk)->transparent;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_stream_is_thin(struct tcp_sock *tp)
{
return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
}


enum tcp_seq_states {
TCP_SEQ_STATE_LISTENING,
TCP_SEQ_STATE_ESTABLISHED,
};

void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
void tcp_seq_stop(struct seq_file *seq, void *v);

struct tcp_seq_afinfo {
sa_family_t family;
};

struct tcp_iter_state {
struct seq_net_private p;
enum tcp_seq_states state;
struct sock *syn_wait_sk;
int bucket, offset, sbucket, num;
loff_t last_pos;
};

extern struct request_sock_ops tcp_request_sock_ops;
extern struct request_sock_ops tcp6_request_sock_ops;

void tcp_v4_destroy_sock(struct sock *sk);

struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features);
struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
;
;
;
;
int tcp_gro_complete(struct sk_buff *skb);

void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_notsent_lowat(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
}

bool tcp_stream_memory_free(const struct sock *sk, int wake);


int tcp4_proc_init(void);
void tcp4_proc_exit(void);


int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
int tcp_conn_request(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct sk_buff *skb);


struct tcp_sock_af_ops {
# 2062 "./include/net/tcp.h"
};

struct tcp_request_sock_ops {
u16 mss_clamp;
# 2078 "./include/net/tcp.h"
struct dst_entry *(*route_req)(const struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
struct request_sock *req);
u32 (*init_seq)(const struct sk_buff *skb);
u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
enum tcp_synack_type synack_type,
struct sk_buff *syn_skb);
};

extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;

extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
# 2106 "./include/net/tcp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
const struct sock *sk, struct sk_buff *skb,
__u16 *mss)
{
return 0;
}


int tcpv4_offload_init(void);

void tcp_v4_init(void);
void tcp_init(void);


void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
u32 reo_wnd);
extern bool tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk);
extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) s64 tcp_rto_delta_us(const struct sock *sk)
{
const struct sk_buff *skb = tcp_rtx_queue_head(sk);
u32 rto = inet_csk(sk)->icsk_rto;
u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);

return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ip_options_rcu *tcp_v4_save_options(struct net *net,
struct sk_buff *skb)
{
const struct ip_options *opt = &((struct tcp_skb_cb *)&((skb)->cb[0]))->header.h4.opt;
struct ip_options_rcu *dopt = ((void *)0);

if (opt->optlen) {
int opt_size = sizeof(*dopt) + opt->optlen;

dopt = kmalloc(opt_size, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
kfree(dopt);
dopt = ((void *)0);
}
}
return dopt;
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
{
return skb->truesize == 2;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void skb_set_tcp_pure_ack(struct sk_buff *skb)
{
skb->truesize = 2;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_inq(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
int answ;

if ((1 << sk->__sk_common.skc_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
answ = 0;
} else if (sock_flag(sk, SOCK_URGINLINE) ||
!tp->urg_data ||
before(tp->urg_seq, tp->copied_seq) ||
!before(tp->urg_seq, tp->rcv_nxt)) {

answ = tp->rcv_nxt - tp->copied_seq;


if (answ && sock_flag(sk, SOCK_DONE))
answ--;
} else {
answ = tp->urg_seq - tp->copied_seq;
}

return answ;
}

int tcp_peek_len(struct socket *sock);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
{
u16 segs_in;

segs_in = __builtin_choose_expr(((!!(sizeof((typeof((u16)(1)) *)1 == (typeof((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u16)(1)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)) * 0l)) : (int *)8))))), (((u16)(1)) > ((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)) ? ((u16)(1)) : ((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs))), ({ typeof((u16)(1)) __UNIQUE_ID___x618 = ((u16)(1)); typeof((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)) __UNIQUE_ID___y619 = ((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)); ((__UNIQUE_ID___x618) > (__UNIQUE_ID___y619) ? (__UNIQUE_ID___x618) : (__UNIQUE_ID___y619)); }));




do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_620(void) ; if (!((sizeof(tp->segs_in) == sizeof(char) || sizeof(tp->segs_in) == sizeof(short) || sizeof(tp->segs_in) == sizeof(int) || sizeof(tp->segs_in) == sizeof(long)) || sizeof(tp->segs_in) == sizeof(long long))) __compiletime_assert_620(); } while (0); do { *(volatile typeof(tp->segs_in) *)&(tp->segs_in) = (tp->segs_in + segs_in); } while (0); } while (0);
if (skb->len > tcp_hdrlen(skb))
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_621(void) ; if (!((sizeof(tp->data_segs_in) == sizeof(char) || sizeof(tp->data_segs_in) == sizeof(short) || sizeof(tp->data_segs_in) == sizeof(int) || sizeof(tp->data_segs_in) == sizeof(long)) || sizeof(tp->data_segs_in) == sizeof(long long))) __compiletime_assert_621(); } while (0); do { *(volatile typeof(tp->data_segs_in) *)&(tp->data_segs_in) = (tp->data_segs_in + segs_in); } while (0); } while (0);
}
# 2223 "./include/net/tcp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_listendrop(const struct sock *sk)
{
atomic_inc(&((struct sock *)sk)->sk_drops);
({ __this_cpu_preempt_check("add"); do { do { const void *__vpp_verify = (typeof((&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS])) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS]))) *)(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS]))); (typeof((typeof(*(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS]))) *)(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS]))) *)(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS]))); (typeof((typeof(*(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS]))) *)(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS]))) *)(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS]))); (typeof((typeof(*(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS]))) *)(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS]))) *)(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS]))); (typeof((typeof(*(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS]))) *)(&((sock_net(sk))->mib.net_statistics->mibs[LINUX_MIB_LISTENDROPS])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0);break; default: __bad_size_call_parameter();break; } } while (0); });
}

enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
# 2239 "./include/net/tcp.h"
struct tcp_ulp_ops {
struct list_head list;


int (*init)(struct sock *sk);

void (*update)(struct sock *sk, struct proto *p,
void (*write_space)(struct sock *sk));

void (*release)(struct sock *sk);

int (*get_info)(const struct sock *sk, struct sk_buff *skb);
size_t (*get_info_size)(const struct sock *sk);

void (*clone)(const struct request_sock *req, struct sock *newsk,
const gfp_t priority);

char name[16];
struct module *owner;
};
int tcp_register_ulp(struct tcp_ulp_ops *type);
void tcp_unregister_ulp(struct tcp_ulp_ops *type);
int tcp_set_ulp(struct sock *sk, const char *name);
void tcp_get_available_ulp(char *buf, size_t len);
void tcp_cleanup_ulp(struct sock *sk);
void tcp_update_ulp(struct sock *sk, struct proto *p,
void (*write_space)(struct sock *sk));






struct sk_msg;
struct sk_psock;


struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);


int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
int flags);
# 2292 "./include/net/tcp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
struct sk_buff *skb,
unsigned int end_offset)
{
skops->skb = skb;
skops->skb_data_end = skb->data + end_offset;
}
# 2313 "./include/net/tcp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
{
struct bpf_sock_ops_kern sock_ops;
int ret;

memset(&sock_ops, 0, __builtin_offsetof(struct bpf_sock_ops_kern, temp));
if (sk_fullsock(sk)) {
sock_ops.is_fullsock = 1;
sock_owned_by_me(sk);
}

sock_ops.sk = sk;
sock_ops.op = op;
if (nargs > 0)
memcpy(sock_ops.args, args, nargs * sizeof(*args));

ret = ({ int __ret = 0; if (__builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&cgroup_bpf_enabled_key[CGROUP_SOCK_OPS])->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&cgroup_bpf_enabled_key[CGROUP_SOCK_OPS])->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&cgroup_bpf_enabled_key[CGROUP_SOCK_OPS])->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&cgroup_bpf_enabled_key[CGROUP_SOCK_OPS])->key) > 0; })), 0) && (&sock_ops)->sk) { typeof(sk) __sk = sk_to_full_sk((&sock_ops)->sk); if (__sk && sk_fullsock(__sk)) __ret = __cgroup_bpf_run_filter_sock_ops(__sk, &sock_ops, CGROUP_SOCK_OPS); } __ret; });
if (ret == 0)
ret = sock_ops.reply;
else
ret = -1;
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
{
u32 args[2] = {arg1, arg2};

return tcp_call_bpf(sk, op, 2, args);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
u32 arg3)
{
u32 args[3] = {arg1, arg2, arg3};

return tcp_call_bpf(sk, op, 3, args);
}
# 2371 "./include/net/tcp.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_timeout_init(struct sock *sk)
{
int timeout;

timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, ((void *)0));

if (timeout <= 0)
timeout = ((unsigned)(1*100));
return __builtin_choose_expr(((!!(sizeof((typeof((int)(timeout)) *)1 == (typeof((int)(((unsigned)(120*100)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(timeout)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(((unsigned)(120*100)))) * 0l)) : (int *)8))))), (((int)(timeout)) < ((int)(((unsigned)(120*100)))) ? ((int)(timeout)) : ((int)(((unsigned)(120*100))))), ({ typeof((int)(timeout)) __UNIQUE_ID___x622 = ((int)(timeout)); typeof((int)(((unsigned)(120*100)))) __UNIQUE_ID___y623 = ((int)(((unsigned)(120*100)))); ((__UNIQUE_ID___x622) < (__UNIQUE_ID___y623) ? (__UNIQUE_ID___x622) : (__UNIQUE_ID___y623)); }));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u32 tcp_rwnd_init_bpf(struct sock *sk)
{
int rwnd;

rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, ((void *)0));

if (rwnd < 0)
rwnd = 0;
return rwnd;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool tcp_bpf_ca_needs_ecn(struct sock *sk)
{
return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, ((void *)0)) == 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_bpf_rtt(struct sock *sk)
{
if ((tcp_sk(sk)->bpf_sock_ops_cb_flags & BPF_SOCK_OPS_RTT_CB_FLAG))
tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, ((void *)0));
}
# 2415 "./include/net/tcp.h"
extern struct static_key_false tcp_tx_delay_enabled;
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tcp_add_tx_delay(struct sk_buff *skb,
const struct tcp_sock *tp)
{
if (__builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&tcp_tx_delay_enabled)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&tcp_tx_delay_enabled)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&tcp_tx_delay_enabled)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&tcp_tx_delay_enabled)->key) > 0; })), 0))
skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * 1000L;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u64 tcp_transmit_time(const struct sock *sk)
{
if (__builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&tcp_tx_delay_enabled)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&tcp_tx_delay_enabled)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&tcp_tx_delay_enabled)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&tcp_tx_delay_enabled)->key) > 0; })), 0)) {
u32 delay = (sk->__sk_common.skc_state == TCP_TIME_WAIT) ?
tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;

return tcp_clock_ns() + (u64)delay * 1000L;
}
return 0;
}
# 53 "net/ipv6/route.c" 2


# 1 "./include/net/dst_metadata.h" 1





# 1 "./include/net/ip_tunnels.h" 1




# 1 "./include/linux/if_tunnel.h" 1






# 1 "./include/uapi/linux/if_tunnel.h" 1
# 48 "./include/uapi/linux/if_tunnel.h"
struct ip_tunnel_parm {
char name[16];
int link;
__be16 i_flags;
__be16 o_flags;
__be32 i_key;
__be32 o_key;
struct iphdr iph;
};

enum {
IFLA_IPTUN_UNSPEC,
IFLA_IPTUN_LINK,
IFLA_IPTUN_LOCAL,
IFLA_IPTUN_REMOTE,
IFLA_IPTUN_TTL,
IFLA_IPTUN_TOS,
IFLA_IPTUN_ENCAP_LIMIT,
IFLA_IPTUN_FLOWINFO,
IFLA_IPTUN_FLAGS,
IFLA_IPTUN_PROTO,
IFLA_IPTUN_PMTUDISC,
IFLA_IPTUN_6RD_PREFIX,
IFLA_IPTUN_6RD_RELAY_PREFIX,
IFLA_IPTUN_6RD_PREFIXLEN,
IFLA_IPTUN_6RD_RELAY_PREFIXLEN,
IFLA_IPTUN_ENCAP_TYPE,
IFLA_IPTUN_ENCAP_FLAGS,
IFLA_IPTUN_ENCAP_SPORT,
IFLA_IPTUN_ENCAP_DPORT,
IFLA_IPTUN_COLLECT_METADATA,
IFLA_IPTUN_FWMARK,
__IFLA_IPTUN_MAX,
};


enum tunnel_encap_types {
TUNNEL_ENCAP_NONE,
TUNNEL_ENCAP_FOU,
TUNNEL_ENCAP_GUE,
TUNNEL_ENCAP_MPLS,
};
# 98 "./include/uapi/linux/if_tunnel.h"
struct ip_tunnel_prl {
__be32 addr;
__u16 flags;
__u16 __reserved;
__u32 datalen;
__u32 __reserved2;

};




struct ip_tunnel_6rd {
struct in6_addr prefix;
__be32 relay_prefix;
__u16 prefixlen;
__u16 relay_prefixlen;
};

enum {
IFLA_GRE_UNSPEC,
IFLA_GRE_LINK,
IFLA_GRE_IFLAGS,
IFLA_GRE_OFLAGS,
IFLA_GRE_IKEY,
IFLA_GRE_OKEY,
IFLA_GRE_LOCAL,
IFLA_GRE_REMOTE,
IFLA_GRE_TTL,
IFLA_GRE_TOS,
IFLA_GRE_PMTUDISC,
IFLA_GRE_ENCAP_LIMIT,
IFLA_GRE_FLOWINFO,
IFLA_GRE_FLAGS,
IFLA_GRE_ENCAP_TYPE,
IFLA_GRE_ENCAP_FLAGS,
IFLA_GRE_ENCAP_SPORT,
IFLA_GRE_ENCAP_DPORT,
IFLA_GRE_COLLECT_METADATA,
IFLA_GRE_IGNORE_DF,
IFLA_GRE_FWMARK,
IFLA_GRE_ERSPAN_INDEX,
IFLA_GRE_ERSPAN_VER,
IFLA_GRE_ERSPAN_DIR,
IFLA_GRE_ERSPAN_HWID,
__IFLA_GRE_MAX,
};






enum {
IFLA_VTI_UNSPEC,
IFLA_VTI_LINK,
IFLA_VTI_IKEY,
IFLA_VTI_OKEY,
IFLA_VTI_LOCAL,
IFLA_VTI_REMOTE,
IFLA_VTI_FWMARK,
__IFLA_VTI_MAX,
};
# 8 "./include/linux/if_tunnel.h" 2
# 6 "./include/net/ip_tunnels.h" 2








# 1 "./include/net/gro_cells.h" 1








struct gro_cell;

struct gro_cells {
struct gro_cell *cells;
};

int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb);
int gro_cells_init(struct gro_cells *gcells, struct net_device *dev);
void gro_cells_destroy(struct gro_cells *gcells);
# 15 "./include/net/ip_tunnels.h" 2

# 1 "./include/net/netns/generic.h" 1
# 28 "./include/net/netns/generic.h"
struct net_generic {
union {
struct {
unsigned int len;
struct callback_head rcu;
} s;

void *ptr[0];
};
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *net_generic(const struct net *net, unsigned int id)
{
struct net_generic *ng;
void *ptr;

rcu_read_lock();
ng = ({ typeof(*(net->gen)) *__UNIQUE_ID_rcu624 = (typeof(*(net->gen)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_625(void) ; if (!((sizeof((net->gen)) == sizeof(char) || sizeof((net->gen)) == sizeof(short) || sizeof((net->gen)) == sizeof(int) || sizeof((net->gen)) == sizeof(long)) || sizeof((net->gen)) == sizeof(long long))) __compiletime_assert_625(); } while (0); (*(const volatile typeof( _Generic(((net->gen)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((net->gen)))) *)&((net->gen))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(net->gen)) *)(__UNIQUE_ID_rcu624)); });
ptr = ng->ptr[id];
rcu_read_unlock();

return ptr;
}
# 17 "./include/net/ip_tunnels.h" 2


# 1 "./include/net/dst_cache.h" 1
# 11 "./include/net/dst_cache.h"
struct dst_cache {
struct dst_cache_pcpu *cache;
unsigned long reset_ts;
};
# 24 "./include/net/dst_cache.h"
struct dst_entry *dst_cache_get(struct dst_cache *dst_cache);
# 33 "./include/net/dst_cache.h"
struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr);
# 43 "./include/net/dst_cache.h"
void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst,
__be32 saddr);
# 56 "./include/net/dst_cache.h"
void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst,
const struct in6_addr *saddr);
# 66 "./include/net/dst_cache.h"
struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache,
struct in6_addr *saddr);
# 77 "./include/net/dst_cache.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void dst_cache_reset(struct dst_cache *dst_cache)
{
dst_cache->reset_ts = jiffies;
}
# 91 "./include/net/dst_cache.h"
void dst_cache_reset_now(struct dst_cache *dst_cache);






int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp);
# 107 "./include/net/dst_cache.h"
void dst_cache_destroy(struct dst_cache *dst_cache);
# 20 "./include/net/ip_tunnels.h" 2
# 39 "./include/net/ip_tunnels.h"
struct ip_tunnel_key {
__be64 tun_id;
union {
struct {
__be32 src;
__be32 dst;
} ipv4;
struct {
struct in6_addr src;
struct in6_addr dst;
} ipv6;
} u;
__be16 tun_flags;
u8 tos;
u8 ttl;
__be32 label;
__be16 tp_src;
__be16 tp_dst;
};
# 69 "./include/net/ip_tunnels.h"
struct ip_tunnel_info {
struct ip_tunnel_key key;

struct dst_cache dst_cache;

u8 options_len;
u8 mode;
};
# 88 "./include/net/ip_tunnels.h"
struct ip_tunnel_encap {
u16 type;
u16 flags;
__be16 sport;
__be16 dport;
};

struct ip_tunnel_prl_entry {
struct ip_tunnel_prl_entry *next;
__be32 addr;
u16 flags;
struct callback_head callback_head;
};

struct metadata_dst;

struct ip_tunnel {
struct ip_tunnel *next;
struct hlist_node hash_node;

struct net_device *dev;
netdevice_tracker dev_tracker;

struct net *net;

unsigned long err_time;

int err_count;


u32 i_seqno;
atomic_t o_seqno;
int tun_hlen;


u32 index;
u8 erspan_ver;
u8 dir;
u16 hwid;

struct dst_cache dst_cache;

struct ip_tunnel_parm parms;

int mlink;
int encap_hlen;
int hlen;
struct ip_tunnel_encap encap;





struct ip_tunnel_prl_entry *prl;
unsigned int prl_count;
unsigned int ip_tnl_net_id;
struct gro_cells gro_cells;
__u32 fwmark;
bool collect_md;
bool ignore_df;
};

struct tnl_ptk_info {
__be16 flags;
__be16 proto;
__be32 key;
__be32 seq;
int hdr_len;
};
# 165 "./include/net/ip_tunnels.h"
struct ip_tunnel_net {
struct net_device *fb_tunnel_dev;
struct rtnl_link_ops *rtnl_link_ops;
struct hlist_head tunnels[(1 << 7)];
struct ip_tunnel *collect_md_tun;
int type;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip_tunnel_key_init(struct ip_tunnel_key *key,
__be32 saddr, __be32 daddr,
u8 tos, u8 ttl, __be32 label,
__be16 tp_src, __be16 tp_dst,
__be64 tun_id, __be16 tun_flags)
{
key->tun_id = tun_id;
key->u.ipv4.src = saddr;
key->u.ipv4.dst = daddr;
memset((unsigned char *)key + (__builtin_offsetof(struct ip_tunnel_key, u.ipv4.dst) + sizeof((((struct ip_tunnel_key *)0)->u.ipv4.dst))),
0, (sizeof((((struct ip_tunnel_key *)0)->u)) - sizeof((((struct ip_tunnel_key *)0)->u.ipv4))));
key->tos = tos;
key->ttl = ttl;
key->label = label;
key->tun_flags = tun_flags;





key->tp_src = tp_src;
key->tp_dst = tp_dst;


if (sizeof(*key) != (__builtin_offsetof(struct ip_tunnel_key, tp_dst) + sizeof((((struct ip_tunnel_key *)0)->tp_dst))))
memset((unsigned char *)key + (__builtin_offsetof(struct ip_tunnel_key, tp_dst) + sizeof((((struct ip_tunnel_key *)0)->tp_dst))),
0, sizeof(*key) - (__builtin_offsetof(struct ip_tunnel_key, tp_dst) + sizeof((((struct ip_tunnel_key *)0)->tp_dst))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
const struct ip_tunnel_info *info)
{
if (skb->mark)
return false;
if (!info)
return true;
if (info->key.tun_flags & (( __be16)(__builtin_constant_p((__u16)((0x2000))) ? ((__u16)( (((__u16)((0x2000)) & (__u16)0x00ffU) << 8) | (((__u16)((0x2000)) & (__u16)0xff00U) >> 8))) : __fswab16((0x2000)))))
return false;

return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
*tun_info)
{
return tun_info->mode & 0x02 ? 10 : 2;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be64 key32_to_tunnel_id(__be32 key)
{



return ( __be64)(( u64)key << 32);

}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __be32 tunnel_id_to_key32(__be64 tun_id)
{



return ( __be32)(( u64)tun_id >> 32);

}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip_tunnel_init_flow(struct flowi4 *fl4,
int proto,
__be32 daddr, __be32 saddr,
__be32 key, __u8 tos,
struct net *net, int oif,
__u32 mark, __u32 tun_inner_hash)
{
memset(fl4, 0, sizeof(*fl4));

if (oif) {
fl4->__fl_common.flowic_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif);

fl4->__fl_common.flowic_oif = fl4->__fl_common.flowic_l3mdev ? 0 : oif;
}

fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->__fl_common.flowic_tos = tos;
fl4->__fl_common.flowic_proto = proto;
fl4->uli.gre_key = key;
fl4->__fl_common.flowic_mark = mark;
fl4->__fl_common.flowic_multipath_hash = tun_inner_hash;
}

int ip_tunnel_init(struct net_device *dev);
void ip_tunnel_uninit(struct net_device *dev);
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
struct net *ip_tunnel_get_link_net(const struct net_device *dev);
int ip_tunnel_get_iflink(const struct net_device *dev);
int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);

void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
struct rtnl_link_ops *ops);

void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const u8 proto, int tunnel_hlen);
int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void *data, int cmd);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);

struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
int link, __be16 flags,
__be32 remote, __be32 local,
__be32 key);

int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
bool log_ecn_error);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p, __u32 fwmark);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p, __u32 fwmark);
void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);

extern const struct header_ops ip_tunnel_header_ops;
__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);

struct ip_tunnel_encap_ops {
size_t (*encap_hlen)(struct ip_tunnel_encap *e);
int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, struct flowi4 *fl4);
int (*err_handler)(struct sk_buff *skb, u32 info);
};



extern const struct ip_tunnel_encap_ops *
iptun_encaps[8];

int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
unsigned int num);
int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
unsigned int num);

int ip_tunnel_encap_setup(struct ip_tunnel *t,
struct ip_tunnel_encap *ipencap);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool pskb_inet_may_pull(struct sk_buff *skb)
{
int nhlen;

switch (skb->protocol) {

case (( __be16)(__builtin_constant_p((__u16)((0x86DD))) ? ((__u16)( (((__u16)((0x86DD)) & (__u16)0x00ffU) << 8) | (((__u16)((0x86DD)) & (__u16)0xff00U) >> 8))) : __fswab16((0x86DD)))):
nhlen = sizeof(struct ipv6hdr);
break;

case (( __be16)(__builtin_constant_p((__u16)((0x0800))) ? ((__u16)( (((__u16)((0x0800)) & (__u16)0x00ffU) << 8) | (((__u16)((0x0800)) & (__u16)0xff00U) >> 8))) : __fswab16((0x0800)))):
nhlen = sizeof(struct iphdr);
break;
default:
nhlen = 0;
}

return pskb_network_may_pull(skb, nhlen);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip_encap_hlen(struct ip_tunnel_encap *e)
{
const struct ip_tunnel_encap_ops *ops;
int hlen = -22;

if (e->type == TUNNEL_ENCAP_NONE)
return 0;

if (e->type >= 8)
return -22;

rcu_read_lock();
ops = ({ typeof(*(iptun_encaps[e->type])) *__UNIQUE_ID_rcu626 = (typeof(*(iptun_encaps[e->type])) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_627(void) ; if (!((sizeof((iptun_encaps[e->type])) == sizeof(char) || sizeof((iptun_encaps[e->type])) == sizeof(short) || sizeof((iptun_encaps[e->type])) == sizeof(int) || sizeof((iptun_encaps[e->type])) == sizeof(long)) || sizeof((iptun_encaps[e->type])) == sizeof(long long))) __compiletime_assert_627(); } while (0); (*(const volatile typeof( _Generic(((iptun_encaps[e->type])), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((iptun_encaps[e->type])))) *)&((iptun_encaps[e->type]))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(iptun_encaps[e->type])) *)(__UNIQUE_ID_rcu626)); });
if (__builtin_expect(!!(ops && ops->encap_hlen), 1))
hlen = ops->encap_hlen(e);
rcu_read_unlock();

return hlen;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
u8 *protocol, struct flowi4 *fl4)
{
const struct ip_tunnel_encap_ops *ops;
int ret = -22;

if (t->encap.type == TUNNEL_ENCAP_NONE)
return 0;

if (t->encap.type >= 8)
return -22;

rcu_read_lock();
ops = ({ typeof(*(iptun_encaps[t->encap.type])) *__UNIQUE_ID_rcu628 = (typeof(*(iptun_encaps[t->encap.type])) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_629(void) ; if (!((sizeof((iptun_encaps[t->encap.type])) == sizeof(char) || sizeof((iptun_encaps[t->encap.type])) == sizeof(short) || sizeof((iptun_encaps[t->encap.type])) == sizeof(int) || sizeof((iptun_encaps[t->encap.type])) == sizeof(long)) || sizeof((iptun_encaps[t->encap.type])) == sizeof(long long))) __compiletime_assert_629(); } while (0); (*(const volatile typeof( _Generic(((iptun_encaps[t->encap.type])), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((iptun_encaps[t->encap.type])))) *)&((iptun_encaps[t->encap.type]))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(iptun_encaps[t->encap.type])) *)(__UNIQUE_ID_rcu628)); });
if (__builtin_expect(!!(ops && ops->build_header), 1))
ret = ops->build_header(skb, &t->encap, protocol, fl4);
rcu_read_unlock();

return ret;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
const struct sk_buff *skb)
{
if (skb->protocol == (( __be16)(__builtin_constant_p((__u16)((0x0800))) ? ((__u16)( (((__u16)((0x0800)) & (__u16)0x00ffU) << 8) | (((__u16)((0x0800)) & (__u16)0xff00U) >> 8))) : __fswab16((0x0800)))))
return iph->tos;
else if (skb->protocol == (( __be16)(__builtin_constant_p((__u16)((0x86DD))) ? ((__u16)( (((__u16)((0x86DD)) & (__u16)0x00ffU) << 8) | (((__u16)((0x86DD)) & (__u16)0xff00U) >> 8))) : __fswab16((0x86DD)))))
return ipv6_get_dsfield((const struct ipv6hdr *)iph);
else
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 ip_tunnel_get_ttl(const struct iphdr *iph,
const struct sk_buff *skb)
{
if (skb->protocol == (( __be16)(__builtin_constant_p((__u16)((0x0800))) ? ((__u16)( (((__u16)((0x0800)) & (__u16)0x00ffU) << 8) | (((__u16)((0x0800)) & (__u16)0xff00U) >> 8))) : __fswab16((0x0800)))))
return iph->ttl;
else if (skb->protocol == (( __be16)(__builtin_constant_p((__u16)((0x86DD))) ? ((__u16)( (((__u16)((0x86DD)) & (__u16)0x00ffU) << 8) | (((__u16)((0x86DD)) & (__u16)0xff00U) >> 8))) : __fswab16((0x86DD)))))
return ((const struct ipv6hdr *)iph)->hop_limit;
else
return 0;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
const struct sk_buff *skb)
{
u8 inner = ip_tunnel_get_dsfield(iph, skb);

return INET_ECN_encapsulate(tos, inner);
}

int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
__be16 inner_proto, bool raw_proto, bool xnet);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
__be16 inner_proto, bool xnet)
{
return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
}

void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, u8 proto,
u8 tos, u8 ttl, __be16 df, bool xnet);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags);
int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
int headroom, bool reply);

int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int iptunnel_pull_offloads(struct sk_buff *skb)
{
if (skb_is_gso(skb)) {
int err;

err = skb_unclone(skb, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
if (__builtin_expect(!!(err), 0))
return err;
((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type &= ~((((netdev_features_t)1 << (NETIF_F_GSO_GRE_BIT)) | ((netdev_features_t)1 << (NETIF_F_GSO_GRE_CSUM_BIT)) | ((netdev_features_t)1 << (NETIF_F_GSO_IPXIP4_BIT)) | ((netdev_features_t)1 << (NETIF_F_GSO_IPXIP6_BIT)) | ((netdev_features_t)1 << (NETIF_F_GSO_UDP_TUNNEL_BIT)) | ((netdev_features_t)1 << (NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT))) >>
NETIF_F_GSO_SHIFT);
}

skb->encapsulation = 0;
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
{
if (pkt_len > 0) {
struct pcpu_sw_netstats *tstats = ({ do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ({ do { const void *__vpp_verify = (typeof((dev->tstats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(dev->tstats)) *)(dev->tstats)); (typeof((typeof(*(dev->tstats)) *)(dev->tstats))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); });

u64_stats_update_begin(&tstats->syncp);
tstats->tx_bytes += pkt_len;
tstats->tx_packets++;
u64_stats_update_end(&tstats->syncp);
do { (void)(tstats); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0);
} else {
struct net_device_stats *err_stats = &dev->stats;

if (pkt_len < 0) {
err_stats->tx_errors++;
err_stats->tx_aborted_errors++;
} else {
err_stats->tx_dropped++;
}
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
{
return info + 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip_tunnel_info_opts_get(void *to,
const struct ip_tunnel_info *info)
{
memcpy(to, info + 1, info->options_len);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
const void *from, int len,
__be16 flags)
{
info->options_len = len;
if (len > 0) {
memcpy(ip_tunnel_info_opts(info), from, len);
info->key.tun_flags |= flags;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
{
return (struct ip_tunnel_info *)lwtstate->data;
}

extern struct static_key_false ip_tunnel_metadata_cnt;


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ip_tunnel_collect_metadata(void)
{
return __builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&ip_tunnel_metadata_cnt)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&ip_tunnel_metadata_cnt)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&ip_tunnel_metadata_cnt)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&ip_tunnel_metadata_cnt)->key) > 0; })), 0);
}

void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) ip_tunnel_core_init(void);

void ip_tunnel_need_metadata(void);
void ip_tunnel_unneed_metadata(void);
# 7 "./include/net/dst_metadata.h" 2


enum metadata_type {
METADATA_IP_TUNNEL,
METADATA_HW_PORT_MUX,
};

struct hw_port_info {
struct net_device *lower_dev;
u32 port_id;
};

struct metadata_dst {
struct dst_entry dst;
enum metadata_type type;
union {
struct ip_tunnel_info tun_info;
struct hw_port_info port_info;
} u;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct metadata_dst *skb_metadata_dst(const struct sk_buff *skb)
{
struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);

if (md_dst && md_dst->dst.flags & 0x0080)
return md_dst;

return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ip_tunnel_info *
skb_tunnel_info(const struct sk_buff *skb)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
struct dst_entry *dst;

if (md_dst && md_dst->type == METADATA_IP_TUNNEL)
return &md_dst->u.tun_info;

dst = skb_dst(skb);
if (dst && dst->lwtstate &&
(dst->lwtstate->type == LWTUNNEL_ENCAP_IP ||
dst->lwtstate->type == LWTUNNEL_ENCAP_IP6))
return lwt_tun_info(dst->lwtstate);

return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool skb_valid_dst(const struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);

return dst && !(dst->flags & 0x0080);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
const struct sk_buff *skb_b)
{
const struct metadata_dst *a, *b;

if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
return 0;

a = (const struct metadata_dst *) skb_dst(skb_a);
b = (const struct metadata_dst *) skb_dst(skb_b);

if (!a != !b || a->type != b->type)
return 1;

switch (a->type) {
case METADATA_HW_PORT_MUX:
return memcmp(&a->u.port_info, &b->u.port_info,
sizeof(a->u.port_info));
case METADATA_IP_TUNNEL:
return memcmp(&a->u.tun_info, &b->u.tun_info,
sizeof(a->u.tun_info) +
a->u.tun_info.options_len);
default:
return 1;
}
}

void metadata_dst_free(struct metadata_dst *);
struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
gfp_t flags);
void metadata_dst_free_percpu(struct metadata_dst *md_dst);
struct metadata_dst *
metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct metadata_dst *tun_rx_dst(int md_size)
{
struct metadata_dst *tun_dst;

tun_dst = metadata_dst_alloc(md_size, METADATA_IP_TUNNEL, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
if (!tun_dst)
return ((void *)0);

tun_dst->u.tun_info.options_len = 0;
tun_dst->u.tun_info.mode = 0;
return tun_dst;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
int md_size;
struct metadata_dst *new_md;

if (!md_dst || md_dst->type != METADATA_IP_TUNNEL)
return ERR_PTR(-22);

md_size = md_dst->u.tun_info.options_len;
new_md = metadata_dst_alloc(md_size, METADATA_IP_TUNNEL, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
if (!new_md)
return ERR_PTR(-12);

memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
sizeof(struct ip_tunnel_info) + md_size);


if (new_md->u.tun_info.dst_cache.cache) {
int ret;

ret = dst_cache_init(&new_md->u.tun_info.dst_cache, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
if (ret) {
metadata_dst_free(new_md);
return ERR_PTR(ret);
}
}


skb_dst_drop(skb);
skb_dst_set(skb, &new_md->dst);
return new_md;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
{
struct metadata_dst *dst;

dst = tun_dst_unclone(skb);
if (IS_ERR(dst))
return ((void *)0);

return &dst->u.tun_info;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
__be32 daddr,
__u8 tos, __u8 ttl,
__be16 tp_dst,
__be16 flags,
__be64 tunnel_id,
int md_size)
{
struct metadata_dst *tun_dst;

tun_dst = tun_rx_dst(md_size);
if (!tun_dst)
return ((void *)0);

ip_tunnel_key_init(&tun_dst->u.tun_info.key,
saddr, daddr, tos, ttl,
0, 0, tp_dst, tunnel_id, flags);
return tun_dst;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
__be16 flags,
__be64 tunnel_id,
int md_size)
{
const struct iphdr *iph = ip_hdr(skb);

return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
0, flags, tunnel_id, md_size);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u8 tos, __u8 ttl,
__be16 tp_dst,
__be32 label,
__be16 flags,
__be64 tunnel_id,
int md_size)
{
struct metadata_dst *tun_dst;
struct ip_tunnel_info *info;

tun_dst = tun_rx_dst(md_size);
if (!tun_dst)
return ((void *)0);

info = &tun_dst->u.tun_info;
info->mode = 0x02;
info->key.tun_flags = flags;
info->key.tun_id = tunnel_id;
info->key.tp_src = 0;
info->key.tp_dst = tp_dst;

info->key.u.ipv6.src = *saddr;
info->key.u.ipv6.dst = *daddr;

info->key.tos = tos;
info->key.ttl = ttl;
info->key.label = label;

return tun_dst;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
__be16 flags,
__be64 tunnel_id,
int md_size)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);

return __ipv6_tun_set_dst(&ip6h->saddr, &ip6h->daddr,
ipv6_get_dsfield(ip6h), ip6h->hop_limit,
0, ip6_flowlabel(ip6h), flags, tunnel_id,
md_size);
}
# 56 "net/ipv6/route.c" 2
# 1 "./include/net/xfrm.h" 1
# 11 "./include/net/xfrm.h"
# 1 "./include/uapi/linux/pfkeyv2.h" 1
# 15 "./include/uapi/linux/pfkeyv2.h"
struct sadb_msg {
__u8 sadb_msg_version;
__u8 sadb_msg_type;
__u8 sadb_msg_errno;
__u8 sadb_msg_satype;
__u16 sadb_msg_len;
__u16 sadb_msg_reserved;
__u32 sadb_msg_seq;
__u32 sadb_msg_pid;
} __attribute__((packed));


struct sadb_ext {
__u16 sadb_ext_len;
__u16 sadb_ext_type;
} __attribute__((packed));


struct sadb_sa {
__u16 sadb_sa_len;
__u16 sadb_sa_exttype;
__be32 sadb_sa_spi;
__u8 sadb_sa_replay;
__u8 sadb_sa_state;
__u8 sadb_sa_auth;
__u8 sadb_sa_encrypt;
__u32 sadb_sa_flags;
} __attribute__((packed));


struct sadb_lifetime {
__u16 sadb_lifetime_len;
__u16 sadb_lifetime_exttype;
__u32 sadb_lifetime_allocations;
__u64 sadb_lifetime_bytes;
__u64 sadb_lifetime_addtime;
__u64 sadb_lifetime_usetime;
} __attribute__((packed));


struct sadb_address {
__u16 sadb_address_len;
__u16 sadb_address_exttype;
__u8 sadb_address_proto;
__u8 sadb_address_prefixlen;
__u16 sadb_address_reserved;
} __attribute__((packed));


struct sadb_key {
__u16 sadb_key_len;
__u16 sadb_key_exttype;
__u16 sadb_key_bits;
__u16 sadb_key_reserved;
} __attribute__((packed));


struct sadb_ident {
__u16 sadb_ident_len;
__u16 sadb_ident_exttype;
__u16 sadb_ident_type;
__u16 sadb_ident_reserved;
__u64 sadb_ident_id;
} __attribute__((packed));


struct sadb_sens {
__u16 sadb_sens_len;
__u16 sadb_sens_exttype;
__u32 sadb_sens_dpd;
__u8 sadb_sens_sens_level;
__u8 sadb_sens_sens_len;
__u8 sadb_sens_integ_level;
__u8 sadb_sens_integ_len;
__u32 sadb_sens_reserved;
} __attribute__((packed));






struct sadb_prop {
__u16 sadb_prop_len;
__u16 sadb_prop_exttype;
__u8 sadb_prop_replay;
__u8 sadb_prop_reserved[3];
} __attribute__((packed));







struct sadb_comb {
__u8 sadb_comb_auth;
__u8 sadb_comb_encrypt;
__u16 sadb_comb_flags;
__u16 sadb_comb_auth_minbits;
__u16 sadb_comb_auth_maxbits;
__u16 sadb_comb_encrypt_minbits;
__u16 sadb_comb_encrypt_maxbits;
__u32 sadb_comb_reserved;
__u32 sadb_comb_soft_allocations;
__u32 sadb_comb_hard_allocations;
__u64 sadb_comb_soft_bytes;
__u64 sadb_comb_hard_bytes;
__u64 sadb_comb_soft_addtime;
__u64 sadb_comb_hard_addtime;
__u64 sadb_comb_soft_usetime;
__u64 sadb_comb_hard_usetime;
} __attribute__((packed));


struct sadb_supported {
__u16 sadb_supported_len;
__u16 sadb_supported_exttype;
__u32 sadb_supported_reserved;
} __attribute__((packed));







struct sadb_alg {
__u8 sadb_alg_id;
__u8 sadb_alg_ivlen;
__u16 sadb_alg_minbits;
__u16 sadb_alg_maxbits;
__u16 sadb_alg_reserved;
} __attribute__((packed));


struct sadb_spirange {
__u16 sadb_spirange_len;
__u16 sadb_spirange_exttype;
__u32 sadb_spirange_min;
__u32 sadb_spirange_max;
__u32 sadb_spirange_reserved;
} __attribute__((packed));


struct sadb_x_kmprivate {
__u16 sadb_x_kmprivate_len;
__u16 sadb_x_kmprivate_exttype;
__u32 sadb_x_kmprivate_reserved;
} __attribute__((packed));


struct sadb_x_sa2 {
__u16 sadb_x_sa2_len;
__u16 sadb_x_sa2_exttype;
__u8 sadb_x_sa2_mode;
__u8 sadb_x_sa2_reserved1;
__u16 sadb_x_sa2_reserved2;
__u32 sadb_x_sa2_sequence;
__u32 sadb_x_sa2_reqid;
} __attribute__((packed));


struct sadb_x_policy {
__u16 sadb_x_policy_len;
__u16 sadb_x_policy_exttype;
__u16 sadb_x_policy_type;
__u8 sadb_x_policy_dir;
__u8 sadb_x_policy_reserved;
__u32 sadb_x_policy_id;
__u32 sadb_x_policy_priority;
} __attribute__((packed));


struct sadb_x_ipsecrequest {
__u16 sadb_x_ipsecrequest_len;
__u16 sadb_x_ipsecrequest_proto;
__u8 sadb_x_ipsecrequest_mode;
__u8 sadb_x_ipsecrequest_level;
__u16 sadb_x_ipsecrequest_reserved1;
__u32 sadb_x_ipsecrequest_reqid;
__u32 sadb_x_ipsecrequest_reserved2;
} __attribute__((packed));





struct sadb_x_nat_t_type {
__u16 sadb_x_nat_t_type_len;
__u16 sadb_x_nat_t_type_exttype;
__u8 sadb_x_nat_t_type_type;
__u8 sadb_x_nat_t_type_reserved[3];
} __attribute__((packed));



struct sadb_x_nat_t_port {
__u16 sadb_x_nat_t_port_len;
__u16 sadb_x_nat_t_port_exttype;
__be16 sadb_x_nat_t_port_port;
__u16 sadb_x_nat_t_port_reserved;
} __attribute__((packed));



struct sadb_x_sec_ctx {
__u16 sadb_x_sec_len;
__u16 sadb_x_sec_exttype;
__u8 sadb_x_ctx_alg;
__u8 sadb_x_ctx_doi;
__u16 sadb_x_ctx_len;
} __attribute__((packed));




struct sadb_x_kmaddress {
__u16 sadb_x_kmaddress_len;
__u16 sadb_x_kmaddress_exttype;
__u32 sadb_x_kmaddress_reserved;
} __attribute__((packed));



struct sadb_x_filter {
__u16 sadb_x_filter_len;
__u16 sadb_x_filter_exttype;
__u32 sadb_x_filter_saddr[4];
__u32 sadb_x_filter_daddr[4];
__u16 sadb_x_filter_family;
__u8 sadb_x_filter_splen;
__u8 sadb_x_filter_dplen;
} __attribute__((packed));
# 12 "./include/net/xfrm.h" 2
# 1 "./include/uapi/linux/ipsec.h" 1
# 13 "./include/uapi/linux/ipsec.h"
enum {
IPSEC_MODE_ANY = 0,
IPSEC_MODE_TRANSPORT = 1,
IPSEC_MODE_TUNNEL = 2,
IPSEC_MODE_BEET = 3
};

enum {
IPSEC_DIR_ANY = 0,
IPSEC_DIR_INBOUND = 1,
IPSEC_DIR_OUTBOUND = 2,
IPSEC_DIR_FWD = 3,
IPSEC_DIR_MAX = 4,
IPSEC_DIR_INVALID = 5
};

enum {
IPSEC_POLICY_DISCARD = 0,
IPSEC_POLICY_NONE = 1,
IPSEC_POLICY_IPSEC = 2,
IPSEC_POLICY_ENTRUST = 3,
IPSEC_POLICY_BYPASS = 4
};

enum {
IPSEC_LEVEL_DEFAULT = 0,
IPSEC_LEVEL_USE = 1,
IPSEC_LEVEL_REQUIRE = 2,
IPSEC_LEVEL_UNIQUE = 3
};
# 13 "./include/net/xfrm.h" 2


# 1 "./include/linux/audit.h" 1
# 13 "./include/linux/audit.h"
# 1 "./include/linux/ptrace.h" 1
# 10 "./include/linux/ptrace.h"
# 1 "./include/linux/pid_namespace.h" 1
# 17 "./include/linux/pid_namespace.h"
struct fs_pin;

struct pid_namespace {
struct idr idr;
struct callback_head rcu;
unsigned int pid_allocated;
struct task_struct *child_reaper;
struct kmem_cache *pid_cachep;
unsigned int level;
struct pid_namespace *parent;



struct user_namespace *user_ns;
struct ucounts *ucounts;
int reboot;
struct ns_common ns;
} ;

extern struct pid_namespace init_pid_ns;




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
{
if (ns != &init_pid_ns)
refcount_inc(&ns->ns.count);
return ns;
}

extern struct pid_namespace *copy_pid_ns(unsigned long flags,
struct user_namespace *user_ns, struct pid_namespace *ns);
extern void zap_pid_ns_processes(struct pid_namespace *pid_ns);
extern int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd);
extern void put_pid_ns(struct pid_namespace *ns);
# 85 "./include/linux/pid_namespace.h"
extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
void pidhash_init(void);
void pid_idr_init(void);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool task_is_in_init_pid_ns(struct task_struct *tsk)
{
return task_active_pid_ns(tsk) == &init_pid_ns;
}
# 11 "./include/linux/ptrace.h" 2
# 1 "./include/uapi/linux/ptrace.h" 1
# 59 "./include/uapi/linux/ptrace.h"
struct ptrace_peeksiginfo_args {
__u64 off;
__u32 flags;
__s32 nr;
};







struct seccomp_metadata {
__u64 filter_off;
__u64 flags;
};







struct ptrace_syscall_info {
__u8 op;
__u8 pad[3];
__u32 arch;
__u64 instruction_pointer;
__u64 stack_pointer;
union {
struct {
__u64 nr;
__u64 args[6];
} entry;
struct {
__s64 rval;
__u8 is_error;
} exit;
struct {
__u64 nr;
__u64 args[6];
__u32 ret_data;
} seccomp;
};
};



struct ptrace_rseq_configuration {
__u64 rseq_abi_pointer;
__u32 rseq_abi_size;
__u32 signature;
__u32 flags;
__u32 pad;
};
# 12 "./include/linux/ptrace.h" 2



struct syscall_info {
__u64 sp;
struct seccomp_data data;
};

extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
# 56 "./include/linux/ptrace.h"
extern long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char *dst, int len);
extern int ptrace_writedata(struct task_struct *tsk, char *src, unsigned long dst, int len);
extern void ptrace_disable(struct task_struct *);
extern int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern int ptrace_notify(int exit_code, unsigned long message);
extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent,
const struct cred *ptracer_cred);
extern void __ptrace_unlink(struct task_struct *child);
extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
# 95 "./include/linux/ptrace.h"
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptrace_reparented(struct task_struct *child)
{
return !same_thread_group(child->real_parent, child->parent);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ptrace_unlink(struct task_struct *child)
{
if (__builtin_expect(!!(child->ptrace), 0))
__ptrace_unlink(child);
}

int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
unsigned long data);
int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
unsigned long data);
# 124 "./include/linux/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct task_struct *ptrace_parent(struct task_struct *task)
{
if (__builtin_expect(!!(task->ptrace), 0))
return ({ typeof(*(task->parent)) *__UNIQUE_ID_rcu630 = (typeof(*(task->parent)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_631(void) ; if (!((sizeof((task->parent)) == sizeof(char) || sizeof((task->parent)) == sizeof(short) || sizeof((task->parent)) == sizeof(int) || sizeof((task->parent)) == sizeof(long)) || sizeof((task->parent)) == sizeof(long long))) __compiletime_assert_631(); } while (0); (*(const volatile typeof( _Generic(((task->parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((task->parent)))) *)&((task->parent))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(task->parent)) *)(__UNIQUE_ID_rcu630)); });
return ((void *)0);
}
# 140 "./include/linux/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ptrace_event_enabled(struct task_struct *task, int event)
{
return task->ptrace & (1 << (3 + (event)));
}
# 155 "./include/linux/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ptrace_event(int event, unsigned long message)
{
if (__builtin_expect(!!(ptrace_event_enabled(get_current(), event)), 0)) {
ptrace_notify((event << 8) | 5, message);
} else if (event == 4) {

if ((get_current()->ptrace & (0x00000001|0x00010000)) == 0x00000001)
send_sig(5, get_current(), 0);
}
}
# 177 "./include/linux/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ptrace_event_pid(int event, struct pid *pid)
{






unsigned long message = 0;
struct pid_namespace *ns;

rcu_read_lock();
ns = task_active_pid_ns(({ typeof(*(get_current()->parent)) *__UNIQUE_ID_rcu632 = (typeof(*(get_current()->parent)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_633(void) ; if (!((sizeof((get_current()->parent)) == sizeof(char) || sizeof((get_current()->parent)) == sizeof(short) || sizeof((get_current()->parent)) == sizeof(int) || sizeof((get_current()->parent)) == sizeof(long)) || sizeof((get_current()->parent)) == sizeof(long long))) __compiletime_assert_633(); } while (0); (*(const volatile typeof( _Generic(((get_current()->parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((get_current()->parent)))) *)&((get_current()->parent))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(get_current()->parent)) *)(__UNIQUE_ID_rcu632)); }));
if (ns)
message = pid_nr_ns(pid, ns);
rcu_read_unlock();

ptrace_event(event, message);
}
# 207 "./include/linux/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ptrace_init_task(struct task_struct *child, bool ptrace)
{
INIT_LIST_HEAD(&child->ptrace_entry);
INIT_LIST_HEAD(&child->ptraced);
child->jobctl = 0;
child->ptrace = 0;
child->parent = child->real_parent;

if (__builtin_expect(!!(ptrace), 0) && get_current()->ptrace) {
child->ptrace = get_current()->ptrace;
__ptrace_link(child, get_current()->parent, get_current()->ptracer_cred);

if (child->ptrace & 0x00010000)
task_set_jobctl_pending(child, (1UL << 19));
else
sigaddset(&child->pending.signal, 19);
}
else
child->ptracer_cred = ((void *)0);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ptrace_release_task(struct task_struct *task)
{
do { if (__builtin_expect(!!(!list_empty(&task->ptraced)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/ptrace.h"), "i" (236), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
ptrace_unlink(task);
do { if (__builtin_expect(!!(!list_empty(&task->ptrace_entry)), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/ptrace.h"), "i" (238), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
}
# 294 "./include/linux/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void user_enable_single_step(struct task_struct *task)
{
do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/ptrace.h"), "i" (296), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0);
}
# 308 "./include/linux/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void user_disable_single_step(struct task_struct *task)
{
}
# 337 "./include/linux/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void user_enable_block_step(struct task_struct *task)
{
do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/ptrace.h"), "i" (339), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void user_single_step_report(struct pt_regs *regs)
{
kernel_siginfo_t info;
clear_siginfo(&info);
info.si_signo = 5;
info.si_errno = 0;
info.si_code = 0;
info._sifields._kill._pid = 0;
info._sifields._kill._uid = 0;
force_sig_info(&info);
}
# 412 "./include/linux/ptrace.h"
extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);

extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ptrace_report_syscall(unsigned long message)
{
int ptrace = get_current()->ptrace;
int signr;

if (!(ptrace & 0x00000001))
return 0;

signr = ptrace_notify(5 | ((ptrace & (1 << (3 + (0)))) ? 0x80 : 0),
message);






if (signr)
send_sig(signr, get_current(), 1);

return fatal_signal_pending(get_current());
}
# 461 "./include/linux/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__warn_unused_result__)) int ptrace_report_syscall_entry(
struct pt_regs *regs)
{
return ptrace_report_syscall(1);
}
# 484 "./include/linux/ptrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ptrace_report_syscall_exit(struct pt_regs *regs, int step)
{
if (step)
user_single_step_report(regs);
else
ptrace_report_syscall(2);
}
# 14 "./include/linux/audit.h" 2
# 1 "./include/linux/audit_arch.h" 1
# 12 "./include/linux/audit_arch.h"
enum auditsc_class_t {
AUDITSC_NATIVE = 0,
AUDITSC_COMPAT,
AUDITSC_OPEN,
AUDITSC_OPENAT,
AUDITSC_SOCKETCALL,
AUDITSC_EXECVE,
AUDITSC_OPENAT2,

AUDITSC_NVALS
};
# 15 "./include/linux/audit.h" 2
# 1 "./include/uapi/linux/audit.h" 1
# 328 "./include/uapi/linux/audit.h"
enum {
Audit_equal,
Audit_not_equal,
Audit_bitmask,
Audit_bittest,
Audit_lt,
Audit_gt,
Audit_le,
Audit_ge,
Audit_bad
};
# 455 "./include/uapi/linux/audit.h"
enum audit_nlgrps {
AUDIT_NLGRP_NONE,
AUDIT_NLGRP_READLOG,
__AUDIT_NLGRP_MAX
};


struct audit_status {
__u32 mask;
__u32 enabled;
__u32 failure;
__u32 pid;
__u32 rate_limit;
__u32 backlog_limit;
__u32 lost;
__u32 backlog;
union {
__u32 version;
__u32 feature_bitmap;
};
__u32 backlog_wait_time;
__u32 backlog_wait_time_actual;


};

struct audit_features {

__u32 vers;
__u32 mask;
__u32 features;
__u32 lock;
};
# 496 "./include/uapi/linux/audit.h"
struct audit_tty_status {
__u32 enabled;
__u32 log_passwd;
};
# 508 "./include/uapi/linux/audit.h"
struct audit_rule_data {
__u32 flags;
__u32 action;
__u32 field_count;
__u32 mask[64];
__u32 fields[64];
__u32 values[64];
__u32 fieldflags[64];
__u32 buflen;
char buf[];
};
# 16 "./include/linux/audit.h" 2
# 1 "./include/uapi/linux/netfilter/nf_tables.h" 1
# 22 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_registers {
NFT_REG_VERDICT,
NFT_REG_1,
NFT_REG_2,
NFT_REG_3,
NFT_REG_4,
__NFT_REG_MAX,

NFT_REG32_00 = 8,
NFT_REG32_01,
NFT_REG32_02,
NFT_REG32_03,
NFT_REG32_04,
NFT_REG32_05,
NFT_REG32_06,
NFT_REG32_07,
NFT_REG32_08,
NFT_REG32_09,
NFT_REG32_10,
NFT_REG32_11,
NFT_REG32_12,
NFT_REG32_13,
NFT_REG32_14,
NFT_REG32_15,
};
# 64 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_verdicts {
NFT_CONTINUE = -1,
NFT_BREAK = -2,
NFT_JUMP = -3,
NFT_GOTO = -4,
NFT_RETURN = -5,
};
# 101 "./include/uapi/linux/netfilter/nf_tables.h"
enum nf_tables_msg_types {
NFT_MSG_NEWTABLE,
NFT_MSG_GETTABLE,
NFT_MSG_DELTABLE,
NFT_MSG_NEWCHAIN,
NFT_MSG_GETCHAIN,
NFT_MSG_DELCHAIN,
NFT_MSG_NEWRULE,
NFT_MSG_GETRULE,
NFT_MSG_DELRULE,
NFT_MSG_NEWSET,
NFT_MSG_GETSET,
NFT_MSG_DELSET,
NFT_MSG_NEWSETELEM,
NFT_MSG_GETSETELEM,
NFT_MSG_DELSETELEM,
NFT_MSG_NEWGEN,
NFT_MSG_GETGEN,
NFT_MSG_TRACE,
NFT_MSG_NEWOBJ,
NFT_MSG_GETOBJ,
NFT_MSG_DELOBJ,
NFT_MSG_GETOBJ_RESET,
NFT_MSG_NEWFLOWTABLE,
NFT_MSG_GETFLOWTABLE,
NFT_MSG_DELFLOWTABLE,
NFT_MSG_MAX,
};






enum nft_list_attributes {
NFTA_LIST_UNSPEC,
NFTA_LIST_ELEM,
__NFTA_LIST_MAX
};
# 150 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_hook_attributes {
NFTA_HOOK_UNSPEC,
NFTA_HOOK_HOOKNUM,
NFTA_HOOK_PRIORITY,
NFTA_HOOK_DEV,
NFTA_HOOK_DEVS,
__NFTA_HOOK_MAX
};







enum nft_table_flags {
NFT_TABLE_F_DORMANT = 0x1,
NFT_TABLE_F_OWNER = 0x2,
};
# 181 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_table_attributes {
NFTA_TABLE_UNSPEC,
NFTA_TABLE_NAME,
NFTA_TABLE_FLAGS,
NFTA_TABLE_USE,
NFTA_TABLE_HANDLE,
NFTA_TABLE_PAD,
NFTA_TABLE_USERDATA,
NFTA_TABLE_OWNER,
__NFTA_TABLE_MAX
};


enum nft_chain_flags {
NFT_CHAIN_BASE = (1 << 0),
NFT_CHAIN_HW_OFFLOAD = (1 << 1),
NFT_CHAIN_BINDING = (1 << 2),
};
# 218 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_chain_attributes {
NFTA_CHAIN_UNSPEC,
NFTA_CHAIN_TABLE,
NFTA_CHAIN_HANDLE,
NFTA_CHAIN_NAME,
NFTA_CHAIN_HOOK,
NFTA_CHAIN_POLICY,
NFTA_CHAIN_USE,
NFTA_CHAIN_TYPE,
NFTA_CHAIN_COUNTERS,
NFTA_CHAIN_PAD,
NFTA_CHAIN_FLAGS,
NFTA_CHAIN_ID,
NFTA_CHAIN_USERDATA,
__NFTA_CHAIN_MAX
};
# 249 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_rule_attributes {
NFTA_RULE_UNSPEC,
NFTA_RULE_TABLE,
NFTA_RULE_CHAIN,
NFTA_RULE_HANDLE,
NFTA_RULE_EXPRESSIONS,
NFTA_RULE_COMPAT,
NFTA_RULE_POSITION,
NFTA_RULE_USERDATA,
NFTA_RULE_PAD,
NFTA_RULE_ID,
NFTA_RULE_POSITION_ID,
NFTA_RULE_CHAIN_ID,
__NFTA_RULE_MAX
};







enum nft_rule_compat_flags {
NFT_RULE_COMPAT_F_INV = (1 << 1),
NFT_RULE_COMPAT_F_MASK = NFT_RULE_COMPAT_F_INV,
};







enum nft_rule_compat_attributes {
NFTA_RULE_COMPAT_UNSPEC,
NFTA_RULE_COMPAT_PROTO,
NFTA_RULE_COMPAT_FLAGS,
__NFTA_RULE_COMPAT_MAX
};
# 303 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_set_flags {
NFT_SET_ANONYMOUS = 0x1,
NFT_SET_CONSTANT = 0x2,
NFT_SET_INTERVAL = 0x4,
NFT_SET_MAP = 0x8,
NFT_SET_TIMEOUT = 0x10,
NFT_SET_EVAL = 0x20,
NFT_SET_OBJECT = 0x40,
NFT_SET_CONCAT = 0x80,
NFT_SET_EXPR = 0x100,
};







enum nft_set_policies {
NFT_SET_POL_PERFORMANCE,
NFT_SET_POL_MEMORY,
};







enum nft_set_desc_attributes {
NFTA_SET_DESC_UNSPEC,
NFTA_SET_DESC_SIZE,
NFTA_SET_DESC_CONCAT,
__NFTA_SET_DESC_MAX
};







enum nft_set_field_attributes {
NFTA_SET_FIELD_UNSPEC,
NFTA_SET_FIELD_LEN,
__NFTA_SET_FIELD_MAX
};
# 373 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_set_attributes {
NFTA_SET_UNSPEC,
NFTA_SET_TABLE,
NFTA_SET_NAME,
NFTA_SET_FLAGS,
NFTA_SET_KEY_TYPE,
NFTA_SET_KEY_LEN,
NFTA_SET_DATA_TYPE,
NFTA_SET_DATA_LEN,
NFTA_SET_POLICY,
NFTA_SET_DESC,
NFTA_SET_ID,
NFTA_SET_TIMEOUT,
NFTA_SET_GC_INTERVAL,
NFTA_SET_USERDATA,
NFTA_SET_PAD,
NFTA_SET_OBJ_TYPE,
NFTA_SET_HANDLE,
NFTA_SET_EXPR,
NFTA_SET_EXPRESSIONS,
__NFTA_SET_MAX
};
# 403 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_set_elem_flags {
NFT_SET_ELEM_INTERVAL_END = 0x1,
NFT_SET_ELEM_CATCHALL = 0x2,
};
# 422 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_set_elem_attributes {
NFTA_SET_ELEM_UNSPEC,
NFTA_SET_ELEM_KEY,
NFTA_SET_ELEM_DATA,
NFTA_SET_ELEM_FLAGS,
NFTA_SET_ELEM_TIMEOUT,
NFTA_SET_ELEM_EXPIRATION,
NFTA_SET_ELEM_USERDATA,
NFTA_SET_ELEM_EXPR,
NFTA_SET_ELEM_PAD,
NFTA_SET_ELEM_OBJREF,
NFTA_SET_ELEM_KEY_END,
NFTA_SET_ELEM_EXPRESSIONS,
__NFTA_SET_ELEM_MAX
};
# 447 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_set_elem_list_attributes {
NFTA_SET_ELEM_LIST_UNSPEC,
NFTA_SET_ELEM_LIST_TABLE,
NFTA_SET_ELEM_LIST_SET,
NFTA_SET_ELEM_LIST_ELEMENTS,
NFTA_SET_ELEM_LIST_SET_ID,
__NFTA_SET_ELEM_LIST_MAX
};
# 471 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_data_types {
NFT_DATA_VALUE,
NFT_DATA_VERDICT = 0xffffff00U,
};
# 484 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_data_attributes {
NFTA_DATA_UNSPEC,
NFTA_DATA_VALUE,
NFTA_DATA_VERDICT,
__NFTA_DATA_MAX
};
# 502 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_verdict_attributes {
NFTA_VERDICT_UNSPEC,
NFTA_VERDICT_CODE,
NFTA_VERDICT_CHAIN,
NFTA_VERDICT_CHAIN_ID,
__NFTA_VERDICT_MAX
};
# 517 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_expr_attributes {
NFTA_EXPR_UNSPEC,
NFTA_EXPR_NAME,
NFTA_EXPR_DATA,
__NFTA_EXPR_MAX
};
# 531 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_immediate_attributes {
NFTA_IMMEDIATE_UNSPEC,
NFTA_IMMEDIATE_DREG,
NFTA_IMMEDIATE_DATA,
__NFTA_IMMEDIATE_MAX
};
# 547 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_bitwise_ops {
NFT_BITWISE_BOOL,
NFT_BITWISE_LSHIFT,
NFT_BITWISE_RSHIFT,
};
# 578 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_bitwise_attributes {
NFTA_BITWISE_UNSPEC,
NFTA_BITWISE_SREG,
NFTA_BITWISE_DREG,
NFTA_BITWISE_LEN,
NFTA_BITWISE_MASK,
NFTA_BITWISE_XOR,
NFTA_BITWISE_OP,
NFTA_BITWISE_DATA,
__NFTA_BITWISE_MAX
};
# 597 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_byteorder_ops {
NFT_BYTEORDER_NTOH,
NFT_BYTEORDER_HTON,
};
# 611 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_byteorder_attributes {
NFTA_BYTEORDER_UNSPEC,
NFTA_BYTEORDER_SREG,
NFTA_BYTEORDER_DREG,
NFTA_BYTEORDER_OP,
NFTA_BYTEORDER_LEN,
NFTA_BYTEORDER_SIZE,
__NFTA_BYTEORDER_MAX
};
# 632 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_cmp_ops {
NFT_CMP_EQ,
NFT_CMP_NEQ,
NFT_CMP_LT,
NFT_CMP_LTE,
NFT_CMP_GT,
NFT_CMP_GTE,
};
# 648 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_cmp_attributes {
NFTA_CMP_UNSPEC,
NFTA_CMP_SREG,
NFTA_CMP_OP,
NFTA_CMP_DATA,
__NFTA_CMP_MAX
};
# 663 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_range_ops {
NFT_RANGE_EQ,
NFT_RANGE_NEQ,
};
# 676 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_range_attributes {
NFTA_RANGE_UNSPEC,
NFTA_RANGE_SREG,
NFTA_RANGE_OP,
NFTA_RANGE_FROM_DATA,
NFTA_RANGE_TO_DATA,
__NFTA_RANGE_MAX
};


enum nft_lookup_flags {
NFT_LOOKUP_F_INV = (1 << 0),
};
# 699 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_lookup_attributes {
NFTA_LOOKUP_UNSPEC,
NFTA_LOOKUP_SET,
NFTA_LOOKUP_SREG,
NFTA_LOOKUP_DREG,
NFTA_LOOKUP_SET_ID,
NFTA_LOOKUP_FLAGS,
__NFTA_LOOKUP_MAX
};


enum nft_dynset_ops {
NFT_DYNSET_OP_ADD,
NFT_DYNSET_OP_UPDATE,
NFT_DYNSET_OP_DELETE,
};

enum nft_dynset_flags {
NFT_DYNSET_F_INV = (1 << 0),
NFT_DYNSET_F_EXPR = (1 << 1),
};
# 734 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_dynset_attributes {
NFTA_DYNSET_UNSPEC,
NFTA_DYNSET_SET_NAME,
NFTA_DYNSET_SET_ID,
NFTA_DYNSET_OP,
NFTA_DYNSET_SREG_KEY,
NFTA_DYNSET_SREG_DATA,
NFTA_DYNSET_TIMEOUT,
NFTA_DYNSET_EXPR,
NFTA_DYNSET_PAD,
NFTA_DYNSET_FLAGS,
NFTA_DYNSET_EXPRESSIONS,
__NFTA_DYNSET_MAX,
};
# 758 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_payload_bases {
NFT_PAYLOAD_LL_HEADER,
NFT_PAYLOAD_NETWORK_HEADER,
NFT_PAYLOAD_TRANSPORT_HEADER,
NFT_PAYLOAD_INNER_HEADER,
};
# 772 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_payload_csum_types {
NFT_PAYLOAD_CSUM_NONE,
NFT_PAYLOAD_CSUM_INET,
NFT_PAYLOAD_CSUM_SCTP,
};

enum nft_payload_csum_flags {
NFT_PAYLOAD_L4CSUM_PSEUDOHDR = (1 << 0),
};
# 794 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_payload_attributes {
NFTA_PAYLOAD_UNSPEC,
NFTA_PAYLOAD_DREG,
NFTA_PAYLOAD_BASE,
NFTA_PAYLOAD_OFFSET,
NFTA_PAYLOAD_LEN,
NFTA_PAYLOAD_SREG,
NFTA_PAYLOAD_CSUM_TYPE,
NFTA_PAYLOAD_CSUM_OFFSET,
NFTA_PAYLOAD_CSUM_FLAGS,
__NFTA_PAYLOAD_MAX
};


enum nft_exthdr_flags {
NFT_EXTHDR_F_PRESENT = (1 << 0),
};
# 820 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_exthdr_op {
NFT_EXTHDR_OP_IPV6,
NFT_EXTHDR_OP_TCPOPT,
NFT_EXTHDR_OP_IPV4,
NFT_EXTHDR_OP_SCTP,
__NFT_EXTHDR_OP_MAX
};
# 840 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_exthdr_attributes {
NFTA_EXTHDR_UNSPEC,
NFTA_EXTHDR_DREG,
NFTA_EXTHDR_TYPE,
NFTA_EXTHDR_OFFSET,
NFTA_EXTHDR_LEN,
NFTA_EXTHDR_FLAGS,
NFTA_EXTHDR_OP,
NFTA_EXTHDR_SREG,
__NFTA_EXTHDR_MAX
};
# 892 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_meta_keys {
NFT_META_LEN,
NFT_META_PROTOCOL,
NFT_META_PRIORITY,
NFT_META_MARK,
NFT_META_IIF,
NFT_META_OIF,
NFT_META_IIFNAME,
NFT_META_OIFNAME,
NFT_META_IFTYPE,

NFT_META_OIFTYPE,
NFT_META_SKUID,
NFT_META_SKGID,
NFT_META_NFTRACE,
NFT_META_RTCLASSID,
NFT_META_SECMARK,
NFT_META_NFPROTO,
NFT_META_L4PROTO,
NFT_META_BRI_IIFNAME,
NFT_META_BRI_OIFNAME,
NFT_META_PKTTYPE,
NFT_META_CPU,
NFT_META_IIFGROUP,
NFT_META_OIFGROUP,
NFT_META_CGROUP,
NFT_META_PRANDOM,
NFT_META_SECPATH,
NFT_META_IIFKIND,
NFT_META_OIFKIND,
NFT_META_BRI_IIFPVID,
NFT_META_BRI_IIFVPROTO,
NFT_META_TIME_NS,
NFT_META_TIME_DAY,
NFT_META_TIME_HOUR,
NFT_META_SDIF,
NFT_META_SDIFNAME,
__NFT_META_IIFTYPE,
};
# 941 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_rt_keys {
NFT_RT_CLASSID,
NFT_RT_NEXTHOP4,
NFT_RT_NEXTHOP6,
NFT_RT_TCPMSS,
NFT_RT_XFRM,
__NFT_RT_MAX
};
# 957 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_hash_types {
NFT_HASH_JENKINS,
NFT_HASH_SYM,
};
# 975 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_hash_attributes {
NFTA_HASH_UNSPEC,
NFTA_HASH_SREG,
NFTA_HASH_DREG,
NFTA_HASH_LEN,
NFTA_HASH_MODULUS,
NFTA_HASH_SEED,
NFTA_HASH_OFFSET,
NFTA_HASH_TYPE,
NFTA_HASH_SET_NAME,
NFTA_HASH_SET_ID,
__NFTA_HASH_MAX,
};
# 997 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_meta_attributes {
NFTA_META_UNSPEC,
NFTA_META_DREG,
NFTA_META_KEY,
NFTA_META_SREG,
__NFTA_META_MAX
};
# 1012 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_rt_attributes {
NFTA_RT_UNSPEC,
NFTA_RT_DREG,
NFTA_RT_KEY,
__NFTA_RT_MAX
};
# 1027 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_socket_attributes {
NFTA_SOCKET_UNSPEC,
NFTA_SOCKET_KEY,
NFTA_SOCKET_DREG,
NFTA_SOCKET_LEVEL,
__NFTA_SOCKET_MAX
};
# 1044 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_socket_keys {
NFT_SOCKET_TRANSPARENT,
NFT_SOCKET_MARK,
NFT_SOCKET_WILDCARD,
NFT_SOCKET_CGROUPV2,
__NFT_SOCKET_MAX
};
# 1081 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_ct_keys {
NFT_CT_STATE,
NFT_CT_DIRECTION,
NFT_CT_STATUS,
NFT_CT_MARK,
NFT_CT_SECMARK,
NFT_CT_EXPIRATION,
NFT_CT_HELPER,
NFT_CT_L3PROTOCOL,
NFT_CT_SRC,
NFT_CT_DST,
NFT_CT_PROTOCOL,
NFT_CT_PROTO_SRC,
NFT_CT_PROTO_DST,
NFT_CT_LABELS,
NFT_CT_PKTS,
NFT_CT_BYTES,
NFT_CT_AVGPKT,
NFT_CT_ZONE,
NFT_CT_EVENTMASK,
NFT_CT_SRC_IP,
NFT_CT_DST_IP,
NFT_CT_SRC_IP6,
NFT_CT_DST_IP6,
NFT_CT_ID,
__NFT_CT_MAX
};
# 1118 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_ct_attributes {
NFTA_CT_UNSPEC,
NFTA_CT_DREG,
NFTA_CT_KEY,
NFTA_CT_DIRECTION,
NFTA_CT_SREG,
__NFTA_CT_MAX
};






enum nft_offload_attributes {
NFTA_FLOW_UNSPEC,
NFTA_FLOW_TABLE_NAME,
__NFTA_FLOW_MAX,
};


enum nft_limit_type {
NFT_LIMIT_PKTS,
NFT_LIMIT_PKT_BYTES
};

enum nft_limit_flags {
NFT_LIMIT_F_INV = (1 << 0),
};
# 1157 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_limit_attributes {
NFTA_LIMIT_UNSPEC,
NFTA_LIMIT_RATE,
NFTA_LIMIT_UNIT,
NFTA_LIMIT_BURST,
NFTA_LIMIT_TYPE,
NFTA_LIMIT_FLAGS,
NFTA_LIMIT_PAD,
__NFTA_LIMIT_MAX
};


enum nft_connlimit_flags {
NFT_CONNLIMIT_F_INV = (1 << 0),
};







enum nft_connlimit_attributes {
NFTA_CONNLIMIT_UNSPEC,
NFTA_CONNLIMIT_COUNT,
NFTA_CONNLIMIT_FLAGS,
__NFTA_CONNLIMIT_MAX
};
# 1193 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_counter_attributes {
NFTA_COUNTER_UNSPEC,
NFTA_COUNTER_BYTES,
NFTA_COUNTER_PACKETS,
NFTA_COUNTER_PAD,
__NFTA_COUNTER_MAX
};
# 1208 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_last_attributes {
NFTA_LAST_UNSPEC,
NFTA_LAST_SET,
NFTA_LAST_MSECS,
NFTA_LAST_PAD,
__NFTA_LAST_MAX
};
# 1227 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_log_attributes {
NFTA_LOG_UNSPEC,
NFTA_LOG_GROUP,
NFTA_LOG_PREFIX,
NFTA_LOG_SNAPLEN,
NFTA_LOG_QTHRESHOLD,
NFTA_LOG_LEVEL,
NFTA_LOG_FLAGS,
__NFTA_LOG_MAX
};
# 1252 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_log_level {
NFT_LOGLEVEL_EMERG,
NFT_LOGLEVEL_ALERT,
NFT_LOGLEVEL_CRIT,
NFT_LOGLEVEL_ERR,
NFT_LOGLEVEL_WARNING,
NFT_LOGLEVEL_NOTICE,
NFT_LOGLEVEL_INFO,
NFT_LOGLEVEL_DEBUG,
NFT_LOGLEVEL_AUDIT,
__NFT_LOGLEVEL_MAX
};
# 1274 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_queue_attributes {
NFTA_QUEUE_UNSPEC,
NFTA_QUEUE_NUM,
NFTA_QUEUE_TOTAL,
NFTA_QUEUE_FLAGS,
NFTA_QUEUE_SREG_QNUM,
__NFTA_QUEUE_MAX
};






enum nft_quota_flags {
NFT_QUOTA_F_INV = (1 << 0),
NFT_QUOTA_F_DEPLETED = (1 << 1),
};
# 1300 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_quota_attributes {
NFTA_QUOTA_UNSPEC,
NFTA_QUOTA_BYTES,
NFTA_QUOTA_FLAGS,
NFTA_QUOTA_PAD,
NFTA_QUOTA_CONSUMED,
__NFTA_QUOTA_MAX
};







enum nft_secmark_attributes {
NFTA_SECMARK_UNSPEC,
NFTA_SECMARK_CTX,
__NFTA_SECMARK_MAX,
};
# 1332 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_reject_types {
NFT_REJECT_ICMP_UNREACH,
NFT_REJECT_TCP_RST,
NFT_REJECT_ICMPX_UNREACH,
};
# 1348 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_reject_inet_code {
NFT_REJECT_ICMPX_NO_ROUTE = 0,
NFT_REJECT_ICMPX_PORT_UNREACH,
NFT_REJECT_ICMPX_HOST_UNREACH,
NFT_REJECT_ICMPX_ADMIN_PROHIBITED,
__NFT_REJECT_ICMPX_MAX
};
# 1363 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_reject_attributes {
NFTA_REJECT_UNSPEC,
NFTA_REJECT_TYPE,
NFTA_REJECT_ICMP_CODE,
__NFTA_REJECT_MAX
};
# 1377 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_nat_types {
NFT_NAT_SNAT,
NFT_NAT_DNAT,
};
# 1393 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_nat_attributes {
NFTA_NAT_UNSPEC,
NFTA_NAT_TYPE,
NFTA_NAT_FAMILY,
NFTA_NAT_REG_ADDR_MIN,
NFTA_NAT_REG_ADDR_MAX,
NFTA_NAT_REG_PROTO_MIN,
NFTA_NAT_REG_PROTO_MAX,
NFTA_NAT_FLAGS,
__NFTA_NAT_MAX
};
# 1413 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_tproxy_attributes {
NFTA_TPROXY_UNSPEC,
NFTA_TPROXY_FAMILY,
NFTA_TPROXY_REG_ADDR,
NFTA_TPROXY_REG_PORT,
__NFTA_TPROXY_MAX
};
# 1429 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_masq_attributes {
NFTA_MASQ_UNSPEC,
NFTA_MASQ_FLAGS,
NFTA_MASQ_REG_PROTO_MIN,
NFTA_MASQ_REG_PROTO_MAX,
__NFTA_MASQ_MAX
};
# 1445 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_redir_attributes {
NFTA_REDIR_UNSPEC,
NFTA_REDIR_REG_PROTO_MIN,
NFTA_REDIR_REG_PROTO_MAX,
NFTA_REDIR_FLAGS,
__NFTA_REDIR_MAX
};
# 1460 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_dup_attributes {
NFTA_DUP_UNSPEC,
NFTA_DUP_SREG_ADDR,
NFTA_DUP_SREG_DEV,
__NFTA_DUP_MAX
};
# 1475 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_fwd_attributes {
NFTA_FWD_UNSPEC,
NFTA_FWD_SREG_DEV,
NFTA_FWD_SREG_ADDR,
NFTA_FWD_NFPROTO,
__NFTA_FWD_MAX
};
# 1493 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_objref_attributes {
NFTA_OBJREF_UNSPEC,
NFTA_OBJREF_IMM_TYPE,
NFTA_OBJREF_IMM_NAME,
NFTA_OBJREF_SET_SREG,
NFTA_OBJREF_SET_NAME,
NFTA_OBJREF_SET_ID,
__NFTA_OBJREF_MAX
};







enum nft_gen_attributes {
NFTA_GEN_UNSPEC,
NFTA_GEN_ID,
NFTA_GEN_PROC_PID,
NFTA_GEN_PROC_NAME,
__NFTA_GEN_MAX
};
# 1528 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_fib_attributes {
NFTA_FIB_UNSPEC,
NFTA_FIB_DREG,
NFTA_FIB_RESULT,
NFTA_FIB_FLAGS,
__NFTA_FIB_MAX
};


enum nft_fib_result {
NFT_FIB_RESULT_UNSPEC,
NFT_FIB_RESULT_OIF,
NFT_FIB_RESULT_OIFNAME,
NFT_FIB_RESULT_ADDRTYPE,
__NFT_FIB_RESULT_MAX
};


enum nft_fib_flags {
NFTA_FIB_F_SADDR = 1 << 0,
NFTA_FIB_F_DADDR = 1 << 1,
NFTA_FIB_F_MARK = 1 << 2,
NFTA_FIB_F_IIF = 1 << 3,
NFTA_FIB_F_OIF = 1 << 4,
NFTA_FIB_F_PRESENT = 1 << 5,
};

enum nft_ct_helper_attributes {
NFTA_CT_HELPER_UNSPEC,
NFTA_CT_HELPER_NAME,
NFTA_CT_HELPER_L3PROTO,
NFTA_CT_HELPER_L4PROTO,
__NFTA_CT_HELPER_MAX,
};


enum nft_ct_timeout_timeout_attributes {
NFTA_CT_TIMEOUT_UNSPEC,
NFTA_CT_TIMEOUT_L3PROTO,
NFTA_CT_TIMEOUT_L4PROTO,
NFTA_CT_TIMEOUT_DATA,
__NFTA_CT_TIMEOUT_MAX,
};


enum nft_ct_expectation_attributes {
NFTA_CT_EXPECT_UNSPEC,
NFTA_CT_EXPECT_L3PROTO,
NFTA_CT_EXPECT_L4PROTO,
NFTA_CT_EXPECT_DPORT,
NFTA_CT_EXPECT_TIMEOUT,
NFTA_CT_EXPECT_SIZE,
__NFTA_CT_EXPECT_MAX,
};
# 1609 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_object_attributes {
NFTA_OBJ_UNSPEC,
NFTA_OBJ_TABLE,
NFTA_OBJ_NAME,
NFTA_OBJ_TYPE,
NFTA_OBJ_DATA,
NFTA_OBJ_USE,
NFTA_OBJ_HANDLE,
NFTA_OBJ_PAD,
NFTA_OBJ_USERDATA,
__NFTA_OBJ_MAX
};
# 1629 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_flowtable_flags {
NFT_FLOWTABLE_HW_OFFLOAD = 0x1,
NFT_FLOWTABLE_COUNTER = 0x2,
NFT_FLOWTABLE_MASK = (NFT_FLOWTABLE_HW_OFFLOAD |
NFT_FLOWTABLE_COUNTER)
};
# 1646 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_flowtable_attributes {
NFTA_FLOWTABLE_UNSPEC,
NFTA_FLOWTABLE_TABLE,
NFTA_FLOWTABLE_NAME,
NFTA_FLOWTABLE_HOOK,
NFTA_FLOWTABLE_USE,
NFTA_FLOWTABLE_HANDLE,
NFTA_FLOWTABLE_PAD,
NFTA_FLOWTABLE_FLAGS,
__NFTA_FLOWTABLE_MAX
};
# 1666 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_flowtable_hook_attributes {
NFTA_FLOWTABLE_HOOK_UNSPEC,
NFTA_FLOWTABLE_HOOK_NUM,
NFTA_FLOWTABLE_HOOK_PRIORITY,
NFTA_FLOWTABLE_HOOK_DEVS,
__NFTA_FLOWTABLE_HOOK_MAX
};
# 1682 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_osf_attributes {
NFTA_OSF_UNSPEC,
NFTA_OSF_DREG,
NFTA_OSF_TTL,
NFTA_OSF_FLAGS,
__NFTA_OSF_MAX,
};


enum nft_osf_flags {
NFT_OSF_F_VERSION = (1 << 0),
};
# 1702 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_synproxy_attributes {
NFTA_SYNPROXY_UNSPEC,
NFTA_SYNPROXY_MSS,
NFTA_SYNPROXY_WSCALE,
NFTA_SYNPROXY_FLAGS,
__NFTA_SYNPROXY_MAX,
};







enum nft_devices_attributes {
NFTA_DEVICE_UNSPEC,
NFTA_DEVICE_NAME,
__NFTA_DEVICE_MAX
};
# 1731 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_xfrm_attributes {
NFTA_XFRM_UNSPEC,
NFTA_XFRM_DREG,
NFTA_XFRM_KEY,
NFTA_XFRM_DIR,
NFTA_XFRM_SPNUM,
__NFTA_XFRM_MAX
};


enum nft_xfrm_keys {
NFT_XFRM_KEY_UNSPEC,
NFT_XFRM_KEY_DADDR_IP4,
NFT_XFRM_KEY_DADDR_IP6,
NFT_XFRM_KEY_SADDR_IP4,
NFT_XFRM_KEY_SADDR_IP6,
NFT_XFRM_KEY_REQID,
NFT_XFRM_KEY_SPI,
__NFT_XFRM_KEY_MAX,
};
# 1773 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_trace_attributes {
NFTA_TRACE_UNSPEC,
NFTA_TRACE_TABLE,
NFTA_TRACE_CHAIN,
NFTA_TRACE_RULE_HANDLE,
NFTA_TRACE_TYPE,
NFTA_TRACE_VERDICT,
NFTA_TRACE_ID,
NFTA_TRACE_LL_HEADER,
NFTA_TRACE_NETWORK_HEADER,
NFTA_TRACE_TRANSPORT_HEADER,
NFTA_TRACE_IIF,
NFTA_TRACE_IIFTYPE,
NFTA_TRACE_OIF,
NFTA_TRACE_OIFTYPE,
NFTA_TRACE_MARK,
NFTA_TRACE_NFPROTO,
NFTA_TRACE_POLICY,
NFTA_TRACE_PAD,
__NFTA_TRACE_MAX
};


enum nft_trace_types {
NFT_TRACETYPE_UNSPEC,
NFT_TRACETYPE_POLICY,
NFT_TRACETYPE_RETURN,
NFT_TRACETYPE_RULE,
__NFT_TRACETYPE_MAX
};
# 1815 "./include/uapi/linux/netfilter/nf_tables.h"
enum nft_ng_attributes {
NFTA_NG_UNSPEC,
NFTA_NG_DREG,
NFTA_NG_MODULUS,
NFTA_NG_TYPE,
NFTA_NG_OFFSET,
NFTA_NG_SET_NAME,
NFTA_NG_SET_ID,
__NFTA_NG_MAX
};


enum nft_ng_types {
NFT_NG_INCREMENTAL,
NFT_NG_RANDOM,
__NFT_NG_MAX
};


enum nft_tunnel_key_ip_attributes {
NFTA_TUNNEL_KEY_IP_UNSPEC,
NFTA_TUNNEL_KEY_IP_SRC,
NFTA_TUNNEL_KEY_IP_DST,
__NFTA_TUNNEL_KEY_IP_MAX
};


enum nft_tunnel_ip6_attributes {
NFTA_TUNNEL_KEY_IP6_UNSPEC,
NFTA_TUNNEL_KEY_IP6_SRC,
NFTA_TUNNEL_KEY_IP6_DST,
NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
__NFTA_TUNNEL_KEY_IP6_MAX
};


enum nft_tunnel_opts_attributes {
NFTA_TUNNEL_KEY_OPTS_UNSPEC,
NFTA_TUNNEL_KEY_OPTS_VXLAN,
NFTA_TUNNEL_KEY_OPTS_ERSPAN,
NFTA_TUNNEL_KEY_OPTS_GENEVE,
__NFTA_TUNNEL_KEY_OPTS_MAX
};


enum nft_tunnel_opts_vxlan_attributes {
NFTA_TUNNEL_KEY_VXLAN_UNSPEC,
NFTA_TUNNEL_KEY_VXLAN_GBP,
__NFTA_TUNNEL_KEY_VXLAN_MAX
};


enum nft_tunnel_opts_erspan_attributes {
NFTA_TUNNEL_KEY_ERSPAN_UNSPEC,
NFTA_TUNNEL_KEY_ERSPAN_VERSION,
NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
__NFTA_TUNNEL_KEY_ERSPAN_MAX
};


enum nft_tunnel_opts_geneve_attributes {
NFTA_TUNNEL_KEY_GENEVE_UNSPEC,
NFTA_TUNNEL_KEY_GENEVE_CLASS,
NFTA_TUNNEL_KEY_GENEVE_TYPE,
NFTA_TUNNEL_KEY_GENEVE_DATA,
__NFTA_TUNNEL_KEY_GENEVE_MAX
};


enum nft_tunnel_flags {
NFT_TUNNEL_F_ZERO_CSUM_TX = (1 << 0),
NFT_TUNNEL_F_DONT_FRAGMENT = (1 << 1),
NFT_TUNNEL_F_SEQ_NUMBER = (1 << 2),
};




enum nft_tunnel_key_attributes {
NFTA_TUNNEL_KEY_UNSPEC,
NFTA_TUNNEL_KEY_ID,
NFTA_TUNNEL_KEY_IP,
NFTA_TUNNEL_KEY_IP6,
NFTA_TUNNEL_KEY_FLAGS,
NFTA_TUNNEL_KEY_TOS,
NFTA_TUNNEL_KEY_TTL,
NFTA_TUNNEL_KEY_SPORT,
NFTA_TUNNEL_KEY_DPORT,
NFTA_TUNNEL_KEY_OPTS,
__NFTA_TUNNEL_KEY_MAX
};


enum nft_tunnel_keys {
NFT_TUNNEL_PATH,
NFT_TUNNEL_ID,
__NFT_TUNNEL_MAX
};


enum nft_tunnel_mode {
NFT_TUNNEL_MODE_NONE,
NFT_TUNNEL_MODE_RX,
NFT_TUNNEL_MODE_TX,
__NFT_TUNNEL_MODE_MAX
};


enum nft_tunnel_attributes {
NFTA_TUNNEL_UNSPEC,
NFTA_TUNNEL_KEY,
NFTA_TUNNEL_DREG,
NFTA_TUNNEL_MODE,
__NFTA_TUNNEL_MAX
};
# 17 "./include/linux/audit.h" 2




struct audit_sig_info {
uid_t uid;
pid_t pid;
char ctx[];
};

struct audit_buffer;
struct audit_context;
struct inode;
struct netlink_skb_parms;
struct path;
struct linux_binprm;
struct mq_attr;
struct mqstat;
struct audit_watch;
struct audit_tree;
struct sk_buff;

struct audit_krule {
u32 pflags;
u32 flags;
u32 listnr;
u32 action;
u32 mask[64];
u32 buflen;
u32 field_count;
char *filterkey;
struct audit_field *fields;
struct audit_field *arch_f;
struct audit_field *inode_f;
struct audit_watch *watch;
struct audit_tree *tree;
struct audit_fsnotify_mark *exe;
struct list_head rlist;
struct list_head list;
u64 prio;
};




struct audit_field {
u32 type;
union {
u32 val;
kuid_t uid;
kgid_t gid;
struct {
char *lsm_str;
void *lsm_rule;
};
};
u32 op;
};

enum audit_ntp_type {
AUDIT_NTP_OFFSET,
AUDIT_NTP_FREQ,
AUDIT_NTP_STATUS,
AUDIT_NTP_TAI,
AUDIT_NTP_TICK,
AUDIT_NTP_ADJUST,

AUDIT_NTP_NVALS
};
# 96 "./include/linux/audit.h"
struct audit_ntp_data {};


enum audit_nfcfgop {
AUDIT_XT_OP_REGISTER,
AUDIT_XT_OP_REPLACE,
AUDIT_XT_OP_UNREGISTER,
AUDIT_NFT_OP_TABLE_REGISTER,
AUDIT_NFT_OP_TABLE_UNREGISTER,
AUDIT_NFT_OP_CHAIN_REGISTER,
AUDIT_NFT_OP_CHAIN_UNREGISTER,
AUDIT_NFT_OP_RULE_REGISTER,
AUDIT_NFT_OP_RULE_UNREGISTER,
AUDIT_NFT_OP_SET_REGISTER,
AUDIT_NFT_OP_SET_UNREGISTER,
AUDIT_NFT_OP_SETELEM_REGISTER,
AUDIT_NFT_OP_SETELEM_UNREGISTER,
AUDIT_NFT_OP_GEN_REGISTER,
AUDIT_NFT_OP_OBJ_REGISTER,
AUDIT_NFT_OP_OBJ_UNREGISTER,
AUDIT_NFT_OP_OBJ_RESET,
AUDIT_NFT_OP_FLOWTABLE_REGISTER,
AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
AUDIT_NFT_OP_INVALID,
};

extern int is_audit_feature_set(int which);

extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) audit_register_class(int class, unsigned *list);
extern int audit_classify_syscall(int abi, unsigned syscall);
extern int audit_classify_arch(int arch);

extern unsigned compat_write_class[];
extern unsigned compat_read_class[];
extern unsigned compat_dir_class[];
extern unsigned compat_chattr_class[];
extern unsigned compat_signal_class[];

extern int audit_classify_compat_syscall(int abi, unsigned syscall);
# 150 "./include/linux/audit.h"
struct filename;
# 214 "./include/linux/audit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__format__(printf, 4, 5)))
void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
const char *fmt, ...)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct audit_buffer *audit_log_start(struct audit_context *ctx,
gfp_t gfp_mask, int type)
{
return ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__format__(printf, 2, 3)))
void audit_log_format(struct audit_buffer *ab, const char *fmt, ...)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_log_end(struct audit_buffer *ab)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_log_n_hex(struct audit_buffer *ab,
const unsigned char *buf, size_t len)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_log_n_string(struct audit_buffer *ab,
const char *buf, size_t n)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_log_n_untrustedstring(struct audit_buffer *ab,
const char *string, size_t n)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_log_untrustedstring(struct audit_buffer *ab,
const char *string)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_log_d_path(struct audit_buffer *ab,
const char *prefix,
const struct path *path)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_log_key(struct audit_buffer *ab, char *key)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_log_path_denied(int type, const char *operation)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int audit_log_task_context(struct audit_buffer *ab)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_log_task_info(struct audit_buffer *ab)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) kuid_t audit_get_loginuid(struct task_struct *tsk)
{
return (kuid_t){ -1 };
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int audit_get_sessionid(struct task_struct *tsk)
{
return ((unsigned int)-1);
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int audit_signal_info(int sig, struct task_struct *t)
{
return 0;
}
# 579 "./include/linux/audit.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int audit_alloc(struct task_struct *task)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int audit_alloc_kernel(struct task_struct *task)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_free(struct task_struct *task)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_uring_entry(u8 op)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_uring_exit(int success, long code)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_syscall_exit(void *pt_regs)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool audit_dummy_context(void)
{
return true;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_set_context(struct task_struct *task, struct audit_context *ctx)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct audit_context *audit_context(void)
{
return ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct filename *audit_reusename(const char *name)
{
return ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_getname(struct filename *name)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_inode(struct filename *name,
const struct dentry *dentry,
unsigned int aflags)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_file(struct file *file)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_inode_parent_hidden(struct filename *name,
const struct dentry *dentry)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_inode_child(struct inode *parent,
const struct dentry *dentry,
const unsigned char type)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_core_dumps(long signr)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_seccomp(unsigned long syscall, long signr, int code)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_seccomp_actions_logged(const char *names,
const char *old_names, int res)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
gid_t gid, umode_t mode)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_bprm(struct linux_binprm *bprm)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int audit_socketcall(int nargs, unsigned long *args)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int audit_socketcall_compat(int nargs, u32 *args)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_fd_pair(int fd1, int fd2)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int audit_sockaddr(int len, void *addr)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len,
unsigned int msg_prio,
const struct timespec64 *abs_timeout)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_mq_notify(mqd_t mqdes,
const struct sigevent *notification)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int audit_log_bprm_fcaps(struct linux_binprm *bprm,
const struct cred *new,
const struct cred *old)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_log_capset(const struct cred *new,
const struct cred *old)
{ }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_mmap_fd(int fd, int flags)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_openat2_how(struct open_how *how)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_log_kern_module(char *name)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_fanotify(unsigned int response)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_tk_injoffset(struct timespec64 offset)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_ntp_init(struct audit_ntp_data *ad)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_ntp_set_old(struct audit_ntp_data *ad,
enum audit_ntp_type type, long long val)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_ntp_set_new(struct audit_ntp_data *ad,
enum audit_ntp_type type, long long val)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_ntp_log(const struct audit_ntp_data *ad)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_ptrace(struct task_struct *t)
{ }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void audit_log_nfcfg(const char *name, u8 af,
unsigned int nentries,
enum audit_nfcfgop op, gfp_t gfp)
{ }





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool audit_loginuid_set(struct task_struct *tsk)
{
return uid_valid(audit_get_loginuid(tsk));
}
# 16 "./include/net/xfrm.h" 2
# 120 "./include/net/xfrm.h"
struct xfrm_state_walk {
struct list_head all;
u8 state;
u8 dying;
u8 proto;
u32 seq;
struct xfrm_address_filter *filter;
};

struct xfrm_state_offload {
struct net_device *dev;
netdevice_tracker dev_tracker;
struct net_device *real_dev;
unsigned long offload_handle;
unsigned int num_exthdrs;
u8 flags;
};

struct xfrm_mode {
u8 encap;
u8 family;
u8 flags;
};


enum {
XFRM_MODE_FLAG_TUNNEL = 1,
};

enum xfrm_replay_mode {
XFRM_REPLAY_MODE_LEGACY,
XFRM_REPLAY_MODE_BMP,
XFRM_REPLAY_MODE_ESN,
};


struct xfrm_state {
possible_net_t xs_net;
union {
struct hlist_node gclist;
struct hlist_node bydst;
};
struct hlist_node bysrc;
struct hlist_node byspi;
struct hlist_node byseq;

refcount_t refcnt;
spinlock_t lock;

struct xfrm_id id;
struct xfrm_selector sel;
struct xfrm_mark mark;
u32 if_id;
u32 tfcpad;

u32 genid;


struct xfrm_state_walk km;


struct {
u32 reqid;
u8 mode;
u8 replay_window;
u8 aalgo, ealgo, calgo;
u8 flags;
u16 family;
xfrm_address_t saddr;
int header_len;
int trailer_len;
u32 extra_flags;
struct xfrm_mark smark;
} props;

struct xfrm_lifetime_cfg lft;


struct xfrm_algo_auth *aalg;
struct xfrm_algo *ealg;
struct xfrm_algo *calg;
struct xfrm_algo_aead *aead;
const char *geniv;


__be16 new_mapping_sport;
u32 new_mapping;
u32 mapping_maxage;


struct xfrm_encap_tmpl *encap;
struct sock *encap_sk;


xfrm_address_t *coaddr;


struct xfrm_state *tunnel;


atomic_t tunnel_users;


struct xfrm_replay_state replay;
struct xfrm_replay_state_esn *replay_esn;


struct xfrm_replay_state preplay;
struct xfrm_replay_state_esn *preplay_esn;


enum xfrm_replay_mode repl_mode;



u32 xflags;


u32 replay_maxage;
u32 replay_maxdiff;


struct timer_list rtimer;


struct xfrm_stats stats;

struct xfrm_lifetime_cur curlft;
struct hrtimer mtimer;

struct xfrm_state_offload xso;


long saved_tmo;


time64_t lastused;

struct page_frag xfrag;



const struct xfrm_type *type;
struct xfrm_mode inner_mode;
struct xfrm_mode inner_mode_iaf;
struct xfrm_mode outer_mode;

const struct xfrm_type_offload *type_offload;


struct xfrm_sec_ctx *security;



void *data;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net *xs_net(struct xfrm_state *x)
{
return read_pnet(&x->xs_net);
}





enum {
XFRM_STATE_VOID,
XFRM_STATE_ACQ,
XFRM_STATE_VALID,
XFRM_STATE_ERROR,
XFRM_STATE_EXPIRED,
XFRM_STATE_DEAD
};


struct km_event {
union {
u32 hard;
u32 proto;
u32 byid;
u32 aevent;
u32 type;
} data;

u32 seq;
u32 portid;
u32 event;
struct net *net;
};

struct xfrm_if_cb {
struct xfrm_if *(*decode_session)(struct sk_buff *skb,
unsigned short family);
};

void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
void xfrm_if_unregister_cb(void);

struct net_device;
struct xfrm_type;
struct xfrm_dst;
struct xfrm_policy_afinfo {
struct dst_ops *dst_ops;
struct dst_entry *(*dst_lookup)(struct net *net,
int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
u32 mark);
int (*get_saddr)(struct net *net, int oif,
xfrm_address_t *saddr,
xfrm_address_t *daddr,
u32 mark);
int (*fill_dst)(struct xfrm_dst *xdst,
struct net_device *dev,
const struct flowi *fl);
struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
};

int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
void km_policy_notify(struct xfrm_policy *xp, int dir,
const struct km_event *c);
void km_state_notify(struct xfrm_state *x, const struct km_event *c);

struct xfrm_tmpl;
int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
struct xfrm_policy *pol);
void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
int __xfrm_state_delete(struct xfrm_state *x);

struct xfrm_state_afinfo {
u8 family;
u8 proto;

const struct xfrm_type_offload *type_offload_esp;

const struct xfrm_type *type_esp;
const struct xfrm_type *type_ipip;
const struct xfrm_type *type_ipip6;
const struct xfrm_type *type_comp;
const struct xfrm_type *type_ah;
const struct xfrm_type *type_routing;
const struct xfrm_type *type_dstopts;

int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*transport_finish)(struct sk_buff *skb,
int async);
void (*local_error)(struct sk_buff *skb, u32 mtu);
};

int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);

struct xfrm_input_afinfo {
u8 family;
bool is_ipip;
int (*callback)(struct sk_buff *skb, u8 protocol,
int err);
};

int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);

void xfrm_flush_gc(void);
void xfrm_state_delete_tunnel(struct xfrm_state *x);

struct xfrm_type {
struct module *owner;
u8 proto;
u8 flags;





int (*init_state)(struct xfrm_state *x);
void (*destructor)(struct xfrm_state *);
int (*input)(struct xfrm_state *, struct sk_buff *skb);
int (*output)(struct xfrm_state *, struct sk_buff *pskb);
int (*reject)(struct xfrm_state *, struct sk_buff *,
const struct flowi *);
};

int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);

struct xfrm_type_offload {
struct module *owner;
u8 proto;
void (*encap)(struct xfrm_state *, struct sk_buff *pskb);
int (*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
int (*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
};

int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm_af2proto(unsigned int family)
{
switch(family) {
case 2:
return IPPROTO_IPIP;
case 10:
return IPPROTO_IPV6;
default:
return 0;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
{
if ((ipproto == IPPROTO_IPIP && x->props.family == 2) ||
(ipproto == IPPROTO_IPV6 && x->props.family == 10))
return &x->inner_mode;
else
return &x->inner_mode_iaf;
}

struct xfrm_tmpl {






struct xfrm_id id;


xfrm_address_t saddr;

unsigned short encap_family;

u32 reqid;


u8 mode;


u8 share;


u8 optional;


u8 allalgs;


u32 aalgos;
u32 ealgos;
u32 calgos;
};




struct xfrm_policy_walk_entry {
struct list_head all;
u8 dead;
};

struct xfrm_policy_walk {
struct xfrm_policy_walk_entry walk;
u8 type;
u32 seq;
};

struct xfrm_policy_queue {
struct sk_buff_head hold_queue;
struct timer_list hold_timer;
unsigned long timeout;
};

struct xfrm_policy {
possible_net_t xp_net;
struct hlist_node bydst;
struct hlist_node byidx;


rwlock_t lock;
refcount_t refcnt;
u32 pos;
struct timer_list timer;

atomic_t genid;
u32 priority;
u32 index;
u32 if_id;
struct xfrm_mark mark;
struct xfrm_selector selector;
struct xfrm_lifetime_cfg lft;
struct xfrm_lifetime_cur curlft;
struct xfrm_policy_walk_entry walk;
struct xfrm_policy_queue polq;
bool bydst_reinsert;
u8 type;
u8 action;
u8 flags;
u8 xfrm_nr;
u16 family;
struct xfrm_sec_ctx *security;
struct xfrm_tmpl xfrm_vec[6];
struct hlist_node bydst_inexact_list;
struct callback_head rcu;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct net *xp_net(const struct xfrm_policy *xp)
{
return read_pnet(&xp->xp_net);
}

struct xfrm_kmaddress {
xfrm_address_t local;
xfrm_address_t remote;
u32 reserved;
u16 family;
};

struct xfrm_migrate {
xfrm_address_t old_daddr;
xfrm_address_t old_saddr;
xfrm_address_t new_daddr;
xfrm_address_t new_saddr;
u8 proto;
u8 mode;
u16 reserved;
u32 reqid;
u16 old_family;
u16 new_family;
};
# 565 "./include/net/xfrm.h"
struct xfrm_mgr {
struct list_head list;
int (*notify)(struct xfrm_state *x, const struct km_event *c);
int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
int (*migrate)(const struct xfrm_selector *sel,
u8 dir, u8 type,
const struct xfrm_migrate *m,
int num_bundles,
const struct xfrm_kmaddress *k,
const struct xfrm_encap_tmpl *encap);
bool (*is_alive)(const struct km_event *c);
};

int xfrm_register_km(struct xfrm_mgr *km);
int xfrm_unregister_km(struct xfrm_mgr *km);

struct xfrm_tunnel_skb_cb {
union {
struct inet_skb_parm h4;
struct inet6_skb_parm h6;
} header;

union {
struct ip_tunnel *ip4;
struct ip6_tnl *ip6;
} tunnel;
};
# 604 "./include/net/xfrm.h"
struct xfrm_skb_cb {
struct xfrm_tunnel_skb_cb header;


union {
struct {
__u32 low;
__u32 hi;
} output;
struct {
__be32 low;
__be32 hi;
} input;
} seq;
};







struct xfrm_mode_skb_cb {
struct xfrm_tunnel_skb_cb header;


__be16 id;
__be16 frag_off;


u8 ihl;


u8 tos;


u8 ttl;


u8 protocol;


u8 optlen;


u8 flow_lbl[3];
};







struct xfrm_spi_skb_cb {
struct xfrm_tunnel_skb_cb header;

unsigned int daddroff;
unsigned int family;
__be32 seq;
};
# 712 "./include/net/xfrm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
bool task_valid)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
bool task_valid)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_audit_state_add(struct xfrm_state *x, int result,
bool task_valid)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_audit_state_delete(struct xfrm_state *x, int result,
bool task_valid)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
struct sk_buff *skb)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_audit_state_replay(struct xfrm_state *x,
struct sk_buff *skb, __be32 net_seq)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
u16 family)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
__be32 net_spi, __be32 net_seq)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_audit_state_icvfail(struct xfrm_state *x,
struct sk_buff *skb, u8 proto)
{
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_pol_hold(struct xfrm_policy *policy)
{
if (__builtin_expect(!!(policy != ((void *)0)), 1))
refcount_inc(&policy->refcnt);
}

void xfrm_policy_destroy(struct xfrm_policy *policy);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_pol_put(struct xfrm_policy *policy)
{
if (refcount_dec_and_test(&policy->refcnt))
xfrm_policy_destroy(policy);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_pols_put(struct xfrm_policy **pols, int npols)
{
int i;
for (i = npols - 1; i >= 0; --i)
xfrm_pol_put(pols[i]);
}

void __xfrm_state_destroy(struct xfrm_state *, bool);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __xfrm_state_put(struct xfrm_state *x)
{
refcount_dec(&x->refcnt);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_state_put(struct xfrm_state *x)
{
if (refcount_dec_and_test(&x->refcnt))
__xfrm_state_destroy(x, false);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_state_put_sync(struct xfrm_state *x)
{
if (refcount_dec_and_test(&x->refcnt))
__xfrm_state_destroy(x, true);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_state_hold(struct xfrm_state *x)
{
refcount_inc(&x->refcnt);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool addr_match(const void *token1, const void *token2,
unsigned int prefixlen)
{
const __be32 *a1 = token1;
const __be32 *a2 = token2;
unsigned int pdw;
unsigned int pbi;

pdw = prefixlen >> 5;
pbi = prefixlen & 0x1f;

if (pdw)
if (memcmp(a1, a2, pdw << 2))
return false;

if (pbi) {
__be32 mask;

mask = (( __be32)(__builtin_constant_p((__u32)(((0xffffffff) << (32 - pbi)))) ? ((__u32)( (((__u32)(((0xffffffff) << (32 - pbi))) & (__u32)0x000000ffUL) << 24) | (((__u32)(((0xffffffff) << (32 - pbi))) & (__u32)0x0000ff00UL) << 8) | (((__u32)(((0xffffffff) << (32 - pbi))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(((0xffffffff) << (32 - pbi))) & (__u32)0xff000000UL) >> 24))) : __fswab32(((0xffffffff) << (32 - pbi)))));

if ((a1[pdw] ^ a2[pdw]) & mask)
return false;
}

return true;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
{

if (sizeof(long) == 4 && prefixlen == 0)
return true;
return !((a1 ^ a2) & (( __be32)(__builtin_constant_p((__u32)((~0UL << (32 - prefixlen)))) ? ((__u32)( (((__u32)((~0UL << (32 - prefixlen))) & (__u32)0x000000ffUL) << 24) | (((__u32)((~0UL << (32 - prefixlen))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((~0UL << (32 - prefixlen))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((~0UL << (32 - prefixlen))) & (__u32)0xff000000UL) >> 24))) : __fswab32((~0UL << (32 - prefixlen))))));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
__be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
{
__be16 port;
switch(fl->u.__fl_common.flowic_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
case IPPROTO_SCTP:
port = uli->ports.sport;
break;
case IPPROTO_ICMP:
case 58:
port = (( __be16)(__builtin_constant_p((__u16)((uli->icmpt.type))) ? ((__u16)( (((__u16)((uli->icmpt.type)) & (__u16)0x00ffU) << 8) | (((__u16)((uli->icmpt.type)) & (__u16)0xff00U) >> 8))) : __fswab16((uli->icmpt.type))));
break;
case 135:
port = (( __be16)(__builtin_constant_p((__u16)((uli->mht.type))) ? ((__u16)( (((__u16)((uli->mht.type)) & (__u16)0x00ffU) << 8) | (((__u16)((uli->mht.type)) & (__u16)0xff00U) >> 8))) : __fswab16((uli->mht.type))));
break;
case IPPROTO_GRE:
port = (( __be16)(__builtin_constant_p((__u16)(((__builtin_constant_p((__u32)(( __u32)(__be32)(uli->gre_key))) ? ((__u32)( (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(uli->gre_key))) >> 16))) ? ((__u16)( (((__u16)(((__builtin_constant_p((__u32)(( __u32)(__be32)(uli->gre_key))) ? ((__u32)( (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(uli->gre_key))) >> 16)) & (__u16)0x00ffU) << 8) | (((__u16)(((__builtin_constant_p((__u32)(( __u32)(__be32)(uli->gre_key))) ? ((__u32)( (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(uli->gre_key))) >> 16)) & (__u16)0xff00U) >> 8))) : __fswab16(((__builtin_constant_p((__u32)(( __u32)(__be32)(uli->gre_key))) ? ((__u32)( (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(uli->gre_key))) >> 16))));
break;
default:
port = 0;
}
return port;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
__be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
{
__be16 port;
switch(fl->u.__fl_common.flowic_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
case IPPROTO_SCTP:
port = uli->ports.dport;
break;
case IPPROTO_ICMP:
case 58:
port = (( __be16)(__builtin_constant_p((__u16)((uli->icmpt.code))) ? ((__u16)( (((__u16)((uli->icmpt.code)) & (__u16)0x00ffU) << 8) | (((__u16)((uli->icmpt.code)) & (__u16)0xff00U) >> 8))) : __fswab16((uli->icmpt.code))));
break;
case IPPROTO_GRE:
port = (( __be16)(__builtin_constant_p((__u16)(((__builtin_constant_p((__u32)(( __u32)(__be32)(uli->gre_key))) ? ((__u32)( (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(uli->gre_key))) & 0xffff))) ? ((__u16)( (((__u16)(((__builtin_constant_p((__u32)(( __u32)(__be32)(uli->gre_key))) ? ((__u32)( (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(uli->gre_key))) & 0xffff)) & (__u16)0x00ffU) << 8) | (((__u16)(((__builtin_constant_p((__u32)(( __u32)(__be32)(uli->gre_key))) ? ((__u32)( (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(uli->gre_key))) & 0xffff)) & (__u16)0xff00U) >> 8))) : __fswab16(((__builtin_constant_p((__u32)(( __u32)(__be32)(uli->gre_key))) ? ((__u32)( (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(uli->gre_key)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(uli->gre_key))) & 0xffff))));
break;
default:
port = 0;
}
return port;
}

bool xfrm_selector_match(const struct xfrm_selector *sel,
const struct flowi *fl, unsigned short family);
# 905 "./include/net/xfrm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
{
return true;
}
# 922 "./include/net/xfrm.h"
struct xfrm_dst {
union {
struct dst_entry dst;
struct rtable rt;
struct rt6_info rt6;
} u;
struct dst_entry *route;
struct dst_entry *child;
struct dst_entry *path;
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int num_pols, num_xfrms;
u32 xfrm_genid;
u32 policy_genid;
u32 route_mtu_cached;
u32 child_mtu_cached;
u32 route_cookie;
u32 path_cookie;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
{







return (struct dst_entry *) dst;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
{






return ((void *)0);
}
# 979 "./include/net/xfrm.h"
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);

struct xfrm_if_parms {
int link;
u32 if_id;
};

struct xfrm_if {
struct xfrm_if *next;
struct net_device *dev;
struct net *net;
struct xfrm_if_parms p;

struct gro_cells gro_cells;
};

struct xfrm_offload {

struct {
__u32 low;
__u32 hi;
} seq;

__u32 flags;
# 1013 "./include/net/xfrm.h"
__u32 status;
# 1023 "./include/net/xfrm.h"
__u8 proto;
__u8 inner_ipproto;
};

struct sec_path {
int len;
int olen;

struct xfrm_state *xvec[6];
struct xfrm_offload ovec[1];
};

struct sec_path *secpath_set(struct sk_buff *skb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
secpath_reset(struct sk_buff *skb)
{



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
{
switch (family) {
case 2:
return addr->a4 == 0;
case 10:
return ipv6_addr_any(&addr->in6);
}
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
__xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
{
return (tmpl->saddr.a4 &&
tmpl->saddr.a4 != x->props.saddr.a4);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
__xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
{
return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
!ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
{
switch (family) {
case 2:
return __xfrm4_state_addr_cmp(tmpl, x);
case 10:
return __xfrm6_state_addr_cmp(tmpl, x);
}
return !0;
}
# 1209 "./include/net/xfrm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_sk_free_policy(struct sock *sk) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
{
return 1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
{
return 1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
{
return 1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm_decode_session_reverse(struct sk_buff *skb,
struct flowi *fl,
unsigned int family)
{
return -38;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm4_policy_check_reverse(struct sock *sk, int dir,
struct sk_buff *skb)
{
return 1;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm6_policy_check_reverse(struct sock *sk, int dir,
struct sk_buff *skb)
{
return 1;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
{
switch (family){
case 2:
return (xfrm_address_t *)&fl->u.ip4.daddr;
case 10:
return (xfrm_address_t *)&fl->u.ip6.daddr;
}
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
{
switch (family){
case 2:
return (xfrm_address_t *)&fl->u.ip4.saddr;
case 10:
return (xfrm_address_t *)&fl->u.ip6.saddr;
}
return ((void *)0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void xfrm_flowi_addr_get(const struct flowi *fl,
xfrm_address_t *saddr, xfrm_address_t *daddr,
unsigned short family)
{
switch(family) {
case 2:
memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
break;
case 10:
saddr->in6 = fl->u.ip6.saddr;
daddr->in6 = fl->u.ip6.daddr;
break;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
__xfrm4_state_addr_check(const struct xfrm_state *x,
const xfrm_address_t *daddr, const xfrm_address_t *saddr)
{
if (daddr->a4 == x->id.daddr.a4 &&
(saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
return 1;
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
__xfrm6_state_addr_check(const struct xfrm_state *x,
const xfrm_address_t *daddr, const xfrm_address_t *saddr)
{
if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
(ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
ipv6_addr_any((struct in6_addr *)saddr) ||
ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
return 1;
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
xfrm_state_addr_check(const struct xfrm_state *x,
const xfrm_address_t *daddr, const xfrm_address_t *saddr,
unsigned short family)
{
switch (family) {
case 2:
return __xfrm4_state_addr_check(x, daddr, saddr);
case 10:
return __xfrm6_state_addr_check(x, daddr, saddr);
}
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
unsigned short family)
{
switch (family) {
case 2:
return __xfrm4_state_addr_check(x,
(const xfrm_address_t *)&fl->u.ip4.daddr,
(const xfrm_address_t *)&fl->u.ip4.saddr);
case 10:
return __xfrm6_state_addr_check(x,
(const xfrm_address_t *)&fl->u.ip6.daddr,
(const xfrm_address_t *)&fl->u.ip6.saddr);
}
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm_state_kern(const struct xfrm_state *x)
{
return atomic_read(&x->tunnel_users);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xfrm_id_proto_valid(u8 proto)
{
switch (proto) {
case IPPROTO_AH:
case IPPROTO_ESP:
case IPPROTO_COMP:

case 43:
case 60:

return true;
default:
return false;
}
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm_id_proto_match(u8 proto, u8 userproto)
{
return (!userproto || proto == userproto ||
(userproto == 255 && (proto == IPPROTO_AH ||
proto == IPPROTO_ESP ||
proto == IPPROTO_COMP)));
}




struct xfrm_algo_aead_info {
char *geniv;
u16 icv_truncbits;
};

struct xfrm_algo_auth_info {
u16 icv_truncbits;
u16 icv_fullbits;
};

struct xfrm_algo_encr_info {
char *geniv;
u16 blockbits;
u16 defkeybits;
};

struct xfrm_algo_comp_info {
u16 threshold;
};

struct xfrm_algo_desc {
char *name;
char *compat;
u8 available:1;
u8 pfkey_supported:1;
union {
struct xfrm_algo_aead_info aead;
struct xfrm_algo_auth_info auth;
struct xfrm_algo_encr_info encr;
struct xfrm_algo_comp_info comp;
} uinfo;
struct sadb_alg desc;
};


struct xfrm4_protocol {
int (*handler)(struct sk_buff *skb);
int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type);
int (*cb_handler)(struct sk_buff *skb, int err);
int (*err_handler)(struct sk_buff *skb, u32 info);

struct xfrm4_protocol *next;
int priority;
};

struct xfrm6_protocol {
int (*handler)(struct sk_buff *skb);
int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type);
int (*cb_handler)(struct sk_buff *skb, int err);
int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info);

struct xfrm6_protocol *next;
int priority;
};


struct xfrm_tunnel {
int (*handler)(struct sk_buff *skb);
int (*cb_handler)(struct sk_buff *skb, int err);
int (*err_handler)(struct sk_buff *skb, u32 info);

struct xfrm_tunnel *next;
int priority;
};

struct xfrm6_tunnel {
int (*handler)(struct sk_buff *skb);
int (*cb_handler)(struct sk_buff *skb, int err);
int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info);
struct xfrm6_tunnel *next;
int priority;
};

void xfrm_init(void);
void xfrm4_init(void);
int xfrm_state_init(struct net *net);
void xfrm_state_fini(struct net *net);
void xfrm4_state_init(void);
void xfrm4_protocol_init(void);
# 1461 "./include/net/xfrm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm6_init(void)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm6_fini(void)
{
;
}







int xfrm_sysctl_init(struct net *net);

void xfrm_sysctl_fini(struct net *net);






void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
struct xfrm_address_filter *filter);
int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
int (*func)(struct xfrm_state *, int, void*), void *);
void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
struct xfrm_state *xfrm_state_alloc(struct net *net);
void xfrm_state_free(struct xfrm_state *x);
struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
const struct flowi *fl,
struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err,
unsigned short family, u32 if_id);
struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
xfrm_address_t *daddr,
xfrm_address_t *saddr,
unsigned short family,
u8 mode, u8 proto, u32 reqid);
struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
unsigned short family);
int xfrm_state_check_expire(struct xfrm_state *x);
void xfrm_state_insert(struct xfrm_state *x);
int xfrm_state_add(struct xfrm_state *x);
int xfrm_state_update(struct xfrm_state *x);
struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family);
struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
u8 proto,
unsigned short family);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s,
int n, unsigned short family)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s,
int n, unsigned short family)
{
}


struct xfrmk_sadinfo {
u32 sadhcnt;
u32 sadhmcnt;
u32 sadcnt;
};

struct xfrmk_spdinfo {
u32 incnt;
u32 outcnt;
u32 fwdcnt;
u32 inscnt;
u32 outscnt;
u32 fwdscnt;
u32 spdhcnt;
u32 spdhmcnt;
};

struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
int xfrm_state_delete(struct xfrm_state *x);
int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
int xfrm_init_replay(struct xfrm_state *x);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
int xfrm_init_state(struct xfrm_state *x);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
int (*finish)(struct net *, struct sock *,
struct sk_buff *));
int xfrm_trans_queue(struct sk_buff *skb,
int (*finish)(struct net *, struct sock *,
struct sk_buff *));
int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
int xfrm_output(struct sock *sk, struct sk_buff *skb);





void xfrm_local_error(struct sk_buff *skb, int mtu);
int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type);
int xfrm4_transport_finish(struct sk_buff *skb, int async);
int xfrm4_rcv(struct sk_buff *skb);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
{
((struct xfrm_tunnel_skb_cb *)&((skb)->cb[0]))->tunnel.ip4 = ((void *)0);
((struct xfrm_spi_skb_cb *)&((skb)->cb[0]))->family = 2;
((struct xfrm_spi_skb_cb *)&((skb)->cb[0]))->daddroff = __builtin_offsetof(struct iphdr, daddr);
return xfrm_input(skb, nexthdr, spi, 0);
}

int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
struct ip6_tnl *t);
int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type);
int xfrm6_transport_finish(struct sk_buff *skb, int async);
int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
int xfrm6_rcv(struct sk_buff *skb);
int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
xfrm_address_t *saddr, u8 proto);
void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
# 1624 "./include/net/xfrm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm_user_policy(struct sock *sk, int optname,
sockptr_t optval, int optlen)
{
return -92;
}


struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
int family, u32 mark);

struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);

void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
int (*func)(struct xfrm_policy *, int, int, void*),
void *);
void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
const struct xfrm_mark *mark,
u32 if_id, u8 type, int dir,
struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err);
struct xfrm_policy *xfrm_policy_byid(struct net *net,
const struct xfrm_mark *mark, u32 if_id,
u8 type, int dir, u32 id, int delete,
int *err);
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
void xfrm_policy_hash_rebuild(struct net *net);
u32 xfrm_get_acqseq(void);
int verify_spi_info(u8 proto, u32 min, u32 max);
int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
u8 mode, u32 reqid, u32 if_id, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create,
unsigned short family);
int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
# 1682 "./include/net/xfrm.h"
int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
xfrm_address_t *addr);

void xfrm_input_init(void);
int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);

void xfrm_probe_algs(void);
int xfrm_count_pfkey_auth_supported(void);
int xfrm_count_pfkey_enc_supported(void);
struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
int probe);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xfrm6_addr_equal(const xfrm_address_t *a,
const xfrm_address_t *b)
{
return ipv6_addr_equal((const struct in6_addr *)a,
(const struct in6_addr *)b);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xfrm_addr_equal(const xfrm_address_t *a,
const xfrm_address_t *b,
sa_family_t family)
{
switch (family) {
default:
case 2:
return (( u32)a->a4 ^ ( u32)b->a4) == 0;
case 10:
return xfrm6_addr_equal(a, b);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm_policy_id2dir(u32 index)
{
return index & 7;
}
# 1764 "./include/net/xfrm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int aead_len(struct xfrm_algo_aead *alg)
{
return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int xfrm_alg_len(const struct xfrm_algo *alg)
{
return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
{
return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
{
return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
}
# 1843 "./include/net/xfrm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
{
# 1853 "./include/net/xfrm.h"
return ((void *)0);

}

void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) xfrm_dev_init(void);
# 1914 "./include/net/xfrm.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_dev_resume(struct sk_buff *skb)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_dev_backlog(struct softnet_data *sd)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
{
return skb;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
{
return 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_dev_state_delete(struct xfrm_state *x)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_dev_state_free(struct xfrm_state *x)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
return false;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_dev_state_advance_esn(struct xfrm_state *x)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xfrm_dst_offload_ok(struct dst_entry *dst)
{
return false;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
{
if (attrs[XFRMA_MARK])
memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
else
m->v = m->m = 0;

return m->v & m->m;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
{
int ret = 0;

if (m->m | m->v)
ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
{
struct xfrm_mark *m = &x->props.smark;

return (m->v & m->m) | (mark & ~m->m);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
{
int ret = 0;

if (if_id)
ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
return ret;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
unsigned int family)
{
bool tunnel = false;

switch(family) {
case 2:
if (((struct xfrm_tunnel_skb_cb *)&((skb)->cb[0]))->tunnel.ip4)
tunnel = true;
break;
case 10:
if (((struct xfrm_tunnel_skb_cb *)&((skb)->cb[0]))->tunnel.ip6)
tunnel = true;
break;
}
if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL))
return -22;

return 0;
}

extern const int xfrm_msg_min[((__XFRM_MSG_MAX - 1) + 1 - XFRM_MSG_BASE)];
extern const struct nla_policy xfrma_policy[(__XFRMA_MAX - 1)+1];

struct xfrm_translator {

int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);


struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
int maxtype, const struct nla_policy *policy,
struct netlink_ext_ack *extack);


int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);

struct module *owner;
};







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct xfrm_translator *xfrm_get_translator(void)
{
return ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void xfrm_put_translator(struct xfrm_translator *xtr)
{
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool xfrm6_local_dontfrag(const struct sock *sk)
{
int proto;

if (!sk || sk->__sk_common.skc_family != 10)
return false;

proto = sk->sk_protocol;
if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
return inet6_sk(sk)->dontfrag;

return false;
}
# 57 "net/ipv6/route.c" 2
# 1 "./include/net/netevent.h" 1
# 15 "./include/net/netevent.h"
struct dst_entry;
struct neighbour;

struct netevent_redirect {
struct dst_entry *old;
struct dst_entry *new;
struct neighbour *neigh;
const void *daddr;
};

enum netevent_notif_type {
NETEVENT_NEIGH_UPDATE = 1,
NETEVENT_REDIRECT,
NETEVENT_DELAY_PROBE_TIME_UPDATE,
NETEVENT_IPV4_MPATH_HASH_UPDATE,
NETEVENT_IPV6_MPATH_HASH_UPDATE,
NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE,
};

int register_netevent_notifier(struct notifier_block *nb);
int unregister_netevent_notifier(struct notifier_block *nb);
int call_netevent_notifiers(unsigned long val, void *v);
# 58 "net/ipv6/route.c" 2

# 1 "./include/net/rtnh.h" 1







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
{
return remaining >= (int)sizeof(*rtnh) &&
rtnh->rtnh_len >= sizeof(*rtnh) &&
rtnh->rtnh_len <= remaining;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct rtnexthop *rtnh_next(const struct rtnexthop *rtnh,
int *remaining)
{
int totlen = (((rtnh->rtnh_len) + 4 - 1) & ~(4 - 1));

*remaining -= totlen;
return (struct rtnexthop *) ((char *) rtnh + totlen);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct nlattr *rtnh_attrs(const struct rtnexthop *rtnh)
{
return (struct nlattr *) ((char *) rtnh + (((sizeof(*rtnh)) + 4 - 1) & ~(4 - 1)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int rtnh_attrlen(const struct rtnexthop *rtnh)
{
return rtnh->rtnh_len - (((sizeof(*rtnh)) + 4 - 1) & ~(4 - 1));
}
# 60 "net/ipv6/route.c" 2





# 1 "./include/linux/btf_ids.h" 1





struct btf_id_set {
u32 cnt;
u32 ids[];
};
# 183 "./include/linux/btf_ids.h"
enum {

BTF_SOCK_TYPE_INET, BTF_SOCK_TYPE_INET_CONN, BTF_SOCK_TYPE_INET_REQ, BTF_SOCK_TYPE_INET_TW, BTF_SOCK_TYPE_REQ, BTF_SOCK_TYPE_SOCK, BTF_SOCK_TYPE_SOCK_COMMON, BTF_SOCK_TYPE_TCP, BTF_SOCK_TYPE_TCP_REQ, BTF_SOCK_TYPE_TCP_TW, BTF_SOCK_TYPE_TCP6, BTF_SOCK_TYPE_UDP, BTF_SOCK_TYPE_UDP6, BTF_SOCK_TYPE_UNIX,

MAX_BTF_SOCK_TYPE,
};

extern u32 btf_sock_ids[];







enum {

BTF_TRACING_TYPE_TASK, BTF_TRACING_TYPE_FILE, BTF_TRACING_TYPE_VMA,

MAX_BTF_TRACING_TYPE,
};

extern u32 btf_tracing_ids[];
# 66 "net/ipv6/route.c" 2





static int ip6_rt_type_to_error(u8 fib6_type);



# 1 "./include/trace/events/fib6.h" 1
# 11 "./include/trace/events/fib6.h"
# 1 "./include/linux/tracepoint.h" 1
# 22 "./include/linux/tracepoint.h"
# 1 "./include/linux/static_call.h" 1
# 135 "./include/linux/static_call.h"
# 1 "./include/linux/cpu.h" 1
# 17 "./include/linux/cpu.h"
# 1 "./include/linux/node.h" 1
# 31 "./include/linux/node.h"
struct node_hmem_attrs {
unsigned int read_bandwidth;
unsigned int write_bandwidth;
unsigned int read_latency;
unsigned int write_latency;
};

enum cache_indexing {
NODE_CACHE_DIRECT_MAP,
NODE_CACHE_INDEXED,
NODE_CACHE_OTHER,
};

enum cache_write_policy {
NODE_CACHE_WRITE_BACK,
NODE_CACHE_WRITE_THROUGH,
NODE_CACHE_WRITE_OTHER,
};
# 59 "./include/linux/node.h"
struct node_cache_attrs {
enum cache_indexing indexing;
enum cache_write_policy write_policy;
u64 size;
u16 line_size;
u8 level;
};






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void node_add_cache(unsigned int nid,
struct node_cache_attrs *cache_attrs)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void node_set_perf_attrs(unsigned int nid,
struct node_hmem_attrs *hmem_attrs,
unsigned access)
{
}


struct node {
struct device dev;
struct list_head access_list;
# 95 "./include/linux/node.h"
};

struct memory_block;
extern struct node *node_devices[];
typedef void (*node_registration_func_t)(struct node *);






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
unsigned long end_pfn,
enum meminit_context context)
{
}


extern void unregister_node(struct node *node);
# 153 "./include/linux/node.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void node_dev_init(void)
{
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __register_one_node(int nid)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int register_one_node(int nid)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int unregister_one_node(int nid)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int register_cpu_under_node(unsigned int cpu, unsigned int nid)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void register_hugetlbfs_with_node(node_registration_func_t reg,
node_registration_func_t unreg)
{
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool node_is_toptier(int node)
{
return node_state(node, N_CPU);
}
# 18 "./include/linux/cpu.h" 2


# 1 "./include/linux/cpuhotplug.h" 1
# 57 "./include/linux/cpuhotplug.h"
enum cpuhp_state {
CPUHP_INVALID = -1,


CPUHP_OFFLINE = 0,
CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
CPUHP_PERF_X86_AMD_UNCORE_PREP,
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
CPUHP_X86_APB_DEAD,
CPUHP_X86_MCE_DEAD,
CPUHP_VIRT_NET_DEAD,
CPUHP_SLUB_DEAD,
CPUHP_DEBUG_OBJ_DEAD,
CPUHP_MM_WRITEBACK_DEAD,

CPUHP_MM_DEMOTION_DEAD,
CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
CPUHP_NET_MVNETA_DEAD,
CPUHP_CPUIDLE_DEAD,
CPUHP_ARM64_FPSIMD_DEAD,
CPUHP_ARM_OMAP_WAKE_DEAD,
CPUHP_IRQ_POLL_DEAD,
CPUHP_BLOCK_SOFTIRQ_DEAD,
CPUHP_BIO_DEAD,
CPUHP_ACPI_CPUDRV_DEAD,
CPUHP_S390_PFAULT_DEAD,
CPUHP_BLK_MQ_DEAD,
CPUHP_FS_BUFF_DEAD,
CPUHP_PRINTK_DEAD,
CPUHP_MM_MEMCQ_DEAD,
CPUHP_XFS_DEAD,
CPUHP_PERCPU_CNT_DEAD,
CPUHP_RADIX_DEAD,
CPUHP_PAGE_ALLOC,
CPUHP_NET_DEV_DEAD,
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_IOVA_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_PADATA_DEAD,
CPUHP_AP_DTPM_CPU_DEAD,
CPUHP_RANDOM_PREPARE,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
CPUHP_PROFILE_PREPARE,
CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
CPUHP_RELAY_PREPARE,
CPUHP_SLAB_PREPARE,
CPUHP_MD_RAID5_PREPARE,
CPUHP_RCUTREE_PREP,
CPUHP_CPUIDLE_COUPLED_PREPARE,
CPUHP_POWERPC_PMAC_PREPARE,
CPUHP_POWERPC_MMU_CTX_PREPARE,
CPUHP_XEN_PREPARE,
CPUHP_XEN_EVTCHN_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
CPUHP_NET_FLOW_PREPARE,
CPUHP_TOPOLOGY_PREPARE,
CPUHP_NET_IUCV_PREPARE,
CPUHP_ARM_BL_PREPARE,
CPUHP_TRACE_RB_PREPARE,
CPUHP_MM_ZS_PREPARE,
CPUHP_MM_ZSWP_MEM_PREPARE,
CPUHP_MM_ZSWP_POOL_PREPARE,
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_PREPARE,
CPUHP_MIPS_SOC_PREPARE,
CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
CPUHP_BRINGUP_CPU,





CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
CPUHP_AP_SCHED_STARTING,
CPUHP_AP_RCUTREE_DYING,
CPUHP_AP_CPU_PM_STARTING,
CPUHP_AP_IRQ_GIC_STARTING,
CPUHP_AP_IRQ_HIP04_STARTING,
CPUHP_AP_IRQ_APPLE_AIC_STARTING,
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
CPUHP_AP_IRQ_RISCV_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
CPUHP_AP_PERF_X86_CQM_STARTING,
CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
CPUHP_AP_ARM_SDEI_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
CPUHP_AP_PERF_ARM_ACPI_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
CPUHP_AP_PERF_RISCV_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
CPUHP_AP_QCOM_TIMER_STARTING,
CPUHP_AP_TEGRA_TIMER_STARTING,
CPUHP_AP_ARMADA_TIMER_STARTING,
CPUHP_AP_MARCO_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
CPUHP_AP_CLINT_TIMER_STARTING,
CPUHP_AP_CSKY_TIMER_STARTING,
CPUHP_AP_TI_GP_TIMER_STARTING,
CPUHP_AP_HYPERV_TIMER_STARTING,
CPUHP_AP_KVM_STARTING,
CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
CPUHP_AP_KVM_ARM_VGIC_STARTING,
CPUHP_AP_KVM_ARM_TIMER_STARTING,

CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
CPUHP_AP_X86_TBOOT_DYING,
CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,


CPUHP_AP_ONLINE_IDLE,
CPUHP_AP_SCHED_WAIT_EMPTY,
CPUHP_AP_SMPBOOT_THREADS,
CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_IRQ_AFFINITY_ONLINE,
CPUHP_AP_BLK_MQ_ONLINE,
CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
CPUHP_AP_X86_INTEL_EPB_ONLINE,
CPUHP_AP_PERF_ONLINE,
CPUHP_AP_PERF_X86_ONLINE,
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
CPUHP_AP_PERF_X86_RAPL_ONLINE,
CPUHP_AP_PERF_X86_CQM_ONLINE,
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
CPUHP_AP_PERF_X86_IDXD_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE,
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
CPUHP_AP_PERF_CSKY_ONLINE,
CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RANDOM_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,

CPUHP_AP_MM_DEMOTION_ONLINE,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
CPUHP_AP_ACTIVE,
CPUHP_ONLINE,
};

int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu), bool multi_instance);

int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name,
bool invoke,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance);
# 279 "./include/linux/cpuhotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpuhp_setup_state(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu))
{
return __cpuhp_setup_state(state, name, true, startup, teardown, false);
}
# 299 "./include/linux/cpuhotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu))
{
return __cpuhp_setup_state_cpuslocked(state, name, true, startup,
teardown, false);
}
# 319 "./include/linux/cpuhotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpuhp_setup_state_nocalls(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu))
{
return __cpuhp_setup_state(state, name, false, startup, teardown,
false);
}
# 341 "./include/linux/cpuhotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu))
{
return __cpuhp_setup_state_cpuslocked(state, name, false, startup,
teardown, false);
}
# 362 "./include/linux/cpuhotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpuhp_setup_state_multi(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu,
struct hlist_node *node),
int (*teardown)(unsigned int cpu,
struct hlist_node *node))
{
return __cpuhp_setup_state(state, name, false,
(void *) startup,
(void *) teardown, true);
}

int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
bool invoke);
int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
struct hlist_node *node, bool invoke);
# 390 "./include/linux/cpuhotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpuhp_state_add_instance(enum cpuhp_state state,
struct hlist_node *node)
{
return __cpuhp_state_add_instance(state, node, true);
}
# 406 "./include/linux/cpuhotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
struct hlist_node *node)
{
return __cpuhp_state_add_instance(state, node, false);
}
# 423 "./include/linux/cpuhotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state,
struct hlist_node *node)
{
return __cpuhp_state_add_instance_cpuslocked(state, node, false);
}

void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke);
# 440 "./include/linux/cpuhotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpuhp_remove_state(enum cpuhp_state state)
{
__cpuhp_remove_state(state, true);
}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpuhp_remove_state_nocalls(enum cpuhp_state state)
{
__cpuhp_remove_state(state, false);
}
# 463 "./include/linux/cpuhotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state)
{
__cpuhp_remove_state_cpuslocked(state, false);
}
# 476 "./include/linux/cpuhotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpuhp_remove_multi_state(enum cpuhp_state state)
{
__cpuhp_remove_state(state, false);
}

int __cpuhp_state_remove_instance(enum cpuhp_state state,
struct hlist_node *node, bool invoke);
# 493 "./include/linux/cpuhotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpuhp_state_remove_instance(enum cpuhp_state state,
struct hlist_node *node)
{
return __cpuhp_state_remove_instance(state, node, true);
}
# 507 "./include/linux/cpuhotplug.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpuhp_state_remove_instance_nocalls(enum cpuhp_state state,
struct hlist_node *node)
{
return __cpuhp_state_remove_instance(state, node, false);
}


void cpuhp_online_idle(enum cpuhp_state state);
# 21 "./include/linux/cpu.h" 2

struct device;
struct device_node;
struct attribute_group;

struct cpu {
int node_id;
int hotpluggable;
struct device dev;
};

extern void boot_cpu_init(void);
extern void boot_cpu_hotplug_init(void);
extern void cpu_init(void);
extern void trap_init(void);

extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu);
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
int cpu, unsigned int *thread);

extern int cpu_add_dev_attr(struct device_attribute *attr);
extern void cpu_remove_dev_attr(struct device_attribute *attr);

extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);

extern ssize_t cpu_show_meltdown(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_mds(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
struct device_attribute *attr,
char *buf);
extern ssize_t cpu_show_itlb_multihit(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf);

extern __attribute__((__format__(printf, 4, 5)))
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...);

extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
extern ssize_t arch_cpu_release(const char *, size_t);
# 91 "./include/linux/cpu.h"
extern bool cpuhp_tasks_frozen;
int add_cpu(unsigned int cpu);
int cpu_device_up(struct device *dev);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
int bringup_hibernate_cpu(unsigned int sleep_cpu);
void bringup_nonboot_cpus(unsigned int setup_max_cpus);
# 114 "./include/linux/cpu.h"
extern struct bus_type cpu_subsys;

extern int lockdep_is_cpus_held(void);


extern void cpus_write_lock(void);
extern void cpus_write_unlock(void);
extern void cpus_read_lock(void);
extern void cpus_read_unlock(void);
extern int cpus_read_trylock(void);
extern void lockdep_assert_cpus_held(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
int remove_cpu(unsigned int cpu);
int cpu_device_down(struct device *dev);
extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
# 165 "./include/linux/cpu.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void thaw_secondary_cpus(void) {}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int suspend_disable_secondary_cpus(void) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void suspend_enable_secondary_cpus(void) { }


void __attribute__((__noreturn__)) cpu_startup_entry(enum cpuhp_state state);

void cpu_idle_poll_ctrl(bool enable);




bool cpu_in_idle(unsigned long pc);

void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void arch_cpu_idle_dead(void);

int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
void play_idle_precise(u64 duration_ns, u64 latency_ns);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void play_idle(unsigned long duration_us)
{
play_idle_precise(duration_us * 1000L, ((u64)~0ULL));
}


bool cpu_wait_death(unsigned int cpu, int seconds);
bool cpu_report_death(void);
void cpuhp_report_idle_dead(void);




enum cpuhp_smt_control {
CPU_SMT_ENABLED,
CPU_SMT_DISABLED,
CPU_SMT_FORCE_DISABLED,
CPU_SMT_NOT_SUPPORTED,
CPU_SMT_NOT_IMPLEMENTED,
};
# 220 "./include/linux/cpu.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpu_smt_disable(bool force) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void cpu_smt_check_topology(void) { }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool cpu_smt_possible(void) { return false; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpuhp_smt_enable(void) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }


extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void);
# 136 "./include/linux/static_call.h" 2
# 282 "./include/linux/static_call.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int static_call_init(void) { return 0; }

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) long __static_call_return0(void)
{
return 0;
}
# 304 "./include/linux/static_call.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __static_call_nop(void) { }
# 328 "./include/linux/static_call.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void __static_call_update(struct static_call_key *key, void *tramp, void *func)
{
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_634(void) ; if (!((sizeof(key->func) == sizeof(char) || sizeof(key->func) == sizeof(short) || sizeof(key->func) == sizeof(int) || sizeof(key->func) == sizeof(long)) || sizeof(key->func) == sizeof(long long))) __compiletime_assert_634(); } while (0); do { *(volatile typeof(key->func) *)&(key->func) = (func); } while (0); } while (0);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int static_call_text_reserved(void *start, void *end)
{
return 0;
}
# 23 "./include/linux/tracepoint.h" 2

struct module;
struct tracepoint;
struct notifier_block;

struct trace_eval_map {
const char *system;
const char *eval_string;
unsigned long eval_value;
};



extern struct srcu_struct tracepoint_srcu;

extern int
tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
extern int
tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data,
int prio);
extern int
tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data,
int prio);
extern int
tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int
tracepoint_probe_register_may_exist(struct tracepoint *tp, void *probe,
void *data)
{
return tracepoint_probe_register_prio_may_exist(tp, probe, data,
10);
}
extern void
for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
void *priv);


struct tp_module {
struct list_head list;
struct module *mod;
};

bool trace_module_has_bad_taint(struct module *mod);
extern int register_tracepoint_module_notifier(struct notifier_block *nb);
extern int unregister_tracepoint_module_notifier(struct notifier_block *nb);
# 91 "./include/linux/tracepoint.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tracepoint_synchronize_unregister(void)
{
synchronize_srcu(&tracepoint_srcu);
synchronize_rcu();
}






extern int syscall_regfunc(void);
extern void syscall_unregfunc(void);
# 125 "./include/linux/tracepoint.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
{
return *p;
}
# 12 "./include/trace/events/fib6.h" 2

extern int __traceiter_fib6_table_lookup(void *__data, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp); extern struct static_call_key __SCK__tp_func_fib6_table_lookup; extern typeof(__traceiter_fib6_table_lookup) __SCT__tp_func_fib6_table_lookup;; extern struct tracepoint __tracepoint_fib6_table_lookup; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void trace_fib6_table_lookup(const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp) { if (static_key_false(&__tracepoint_fib6_table_lookup.key)) do { int __attribute__((__unused__)) __idx = 0; if (!(cpu_online((((struct thread_info *)get_current())->cpu)))) return; ({ int __ret_warn_on = !!(0 && ((preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4))))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/trace/events/fib6.h"), "i" (87), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } __traceiter_fib6_table_lookup(((void *)0), net, res, table, flp); if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpu_online((((struct thread_info *)get_current())->cpu)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_fib6_table_lookup.funcs)) *__UNIQUE_ID_rcu635 = (typeof(*(__tracepoint_fib6_table_lookup.funcs)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_636(void) ; if (!((sizeof((__tracepoint_fib6_table_lookup.funcs)) == sizeof(char) || sizeof((__tracepoint_fib6_table_lookup.funcs)) == sizeof(short) || sizeof((__tracepoint_fib6_table_lookup.funcs)) == sizeof(int) || sizeof((__tracepoint_fib6_table_lookup.funcs)) == sizeof(long)) || sizeof((__tracepoint_fib6_table_lookup.funcs)) == sizeof(long long))) __compiletime_assert_636(); } while (0); (*(const volatile typeof( _Generic(((__tracepoint_fib6_table_lookup.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_fib6_table_lookup.funcs)))) *)&((__tracepoint_fib6_table_lookup.funcs))); }); do { } while (0 && (!((0) || rcu_read_lock_sched_held()))); ; ((typeof(*(__tracepoint_fib6_table_lookup.funcs)) *)(__UNIQUE_ID_rcu635)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void trace_fib6_table_lookup_rcuidle(const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp) { if (static_key_false(&__tracepoint_fib6_table_lookup.key)) do { int __attribute__((__unused__)) __idx = 0; if (!(cpu_online((((struct thread_info *)get_current())->cpu)))) return; ({ int __ret_warn_on = !!(1 && ((preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4))))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/trace/events/fib6.h"), "i" (87), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } __traceiter_fib6_table_lookup(((void *)0), net, res, table, flp); if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int register_trace_fib6_table_lookup(void (*probe)(void *__data, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp), void *data) { return tracepoint_probe_register(&__tracepoint_fib6_table_lookup, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int register_trace_prio_fib6_table_lookup(void (*probe)(void *__data, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_fib6_table_lookup, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int unregister_trace_fib6_table_lookup(void (*probe)(void *__data, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp), void *data) { return tracepoint_probe_unregister(&__tracepoint_fib6_table_lookup, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void check_trace_callback_type_fib6_table_lookup(void (*cb)(void *__data, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool trace_fib6_table_lookup_enabled(void) { return static_key_false(&__tracepoint_fib6_table_lookup.key); };
# 92 "./include/trace/events/fib6.h"
# 1 "./include/trace/define_trace.h" 1
# 95 "./include/trace/define_trace.h"
# 1 "./include/trace/events/fib6.h" 1
# 11 "./include/trace/events/fib6.h"
# 1 "./include/linux/tracepoint.h" 1
# 12 "./include/trace/events/fib6.h" 2

static const char __tpstrtab_fib6_table_lookup[] __attribute__((__section__("__tracepoints_strings"))) = "fib6_table_lookup"; extern struct static_call_key __SCK__tp_func_fib6_table_lookup; int __traceiter_fib6_table_lookup(void *__data, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp); struct tracepoint __tracepoint_fib6_table_lookup __attribute__((__used__)) __attribute__((__section__("__tracepoints"))) = { .name = __tpstrtab_fib6_table_lookup, .key = { .enabled = { (0) } }, .static_call_key = &__SCK__tp_func_fib6_table_lookup, .static_call_tramp = ((void *)0), .iterator = &__traceiter_fib6_table_lookup, .regfunc = ((void *)0), .unregfunc = ((void *)0), .funcs = ((void *)0) }; static tracepoint_ptr_t __tracepoint_ptr_fib6_table_lookup __attribute__((__used__)) __attribute__((__section__("__tracepoints_ptrs"))) = &__tracepoint_fib6_table_lookup; int __traceiter_fib6_table_lookup(void *__data, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp) { struct tracepoint_func *it_func_ptr; void *it_func; it_func_ptr = ({ typeof((&__tracepoint_fib6_table_lookup)->funcs) __UNIQUE_ID_rcu637 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_638(void) ; if (!((sizeof((&__tracepoint_fib6_table_lookup)->funcs) == sizeof(char) || sizeof((&__tracepoint_fib6_table_lookup)->funcs) == sizeof(short) || sizeof((&__tracepoint_fib6_table_lookup)->funcs) == sizeof(int) || sizeof((&__tracepoint_fib6_table_lookup)->funcs) == sizeof(long)) || sizeof((&__tracepoint_fib6_table_lookup)->funcs) == sizeof(long long))) __compiletime_assert_638(); } while (0); (*(const volatile typeof( _Generic(((&__tracepoint_fib6_table_lookup)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_fib6_table_lookup)->funcs))) *)&((&__tracepoint_fib6_table_lookup)->funcs)); }); ((typeof(*(&__tracepoint_fib6_table_lookup)->funcs) *)(__UNIQUE_ID_rcu637)); }); if (it_func_ptr) { do { it_func = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_639(void) ; if (!((sizeof((it_func_ptr)->func) == sizeof(char) || sizeof((it_func_ptr)->func) == sizeof(short) || sizeof((it_func_ptr)->func) == sizeof(int) || sizeof((it_func_ptr)->func) == sizeof(long)) || sizeof((it_func_ptr)->func) == sizeof(long long))) __compiletime_assert_639(); } while (0); (*(const volatile typeof( _Generic(((it_func_ptr)->func), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((it_func_ptr)->func))) *)&((it_func_ptr)->func)); }); __data = (it_func_ptr)->data; ((void(*)(void *, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp))(it_func))(__data, net, res, table, flp); } while ((++it_func_ptr)->func); } return 0; } extern struct static_call_key __SCK__tp_func_fib6_table_lookup; extern typeof(__traceiter_fib6_table_lookup) __SCT__tp_func_fib6_table_lookup;; struct static_call_key __SCK__tp_func_fib6_table_lookup = { .func = __traceiter_fib6_table_lookup, };;;
# 92 "./include/trace/events/fib6.h"
# 1 "./include/trace/define_trace.h" 1
# 93 "./include/trace/events/fib6.h" 2
# 96 "./include/trace/define_trace.h" 2






# 1 "./include/trace/trace_events.h" 1
# 21 "./include/trace/trace_events.h"
# 1 "./include/linux/trace_events.h" 1





# 1 "./include/linux/ring_buffer.h" 1








struct trace_buffer;
struct ring_buffer_iter;




struct ring_buffer_event {
u32 type_len:5, time_delta:27;

u32 array[];
};
# 55 "./include/linux/ring_buffer.h"
enum ring_buffer_type {
RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
RINGBUF_TYPE_PADDING,
RINGBUF_TYPE_TIME_EXTEND,
RINGBUF_TYPE_TIME_STAMP,
};

unsigned ring_buffer_event_length(struct ring_buffer_event *event);
void *ring_buffer_event_data(struct ring_buffer_event *event);
u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
struct ring_buffer_event *event);
# 81 "./include/linux/ring_buffer.h"
void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event);




struct trace_buffer *
__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
# 101 "./include/linux/ring_buffer.h"
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);




void ring_buffer_free(struct trace_buffer *buffer);

int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);

void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);

struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
unsigned long length);
int ring_buffer_unlock_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event);
int ring_buffer_write(struct trace_buffer *buffer,
unsigned long length, void *data);

void ring_buffer_nest_start(struct trace_buffer *buffer);
void ring_buffer_nest_end(struct trace_buffer *buffer);

struct ring_buffer_event *
ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_event *
ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);

struct ring_buffer_iter *
ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
void ring_buffer_read_prepare_sync(void);
void ring_buffer_read_start(struct ring_buffer_iter *iter);
void ring_buffer_read_finish(struct ring_buffer_iter *iter);

struct ring_buffer_event *
ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);

unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);

void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
void ring_buffer_reset(struct trace_buffer *buffer);


int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
struct trace_buffer *buffer_b, int cpu);
# 162 "./include/linux/ring_buffer.h"
bool ring_buffer_empty(struct trace_buffer *buffer);
bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);

void ring_buffer_record_disable(struct trace_buffer *buffer);
void ring_buffer_record_enable(struct trace_buffer *buffer);
void ring_buffer_record_off(struct trace_buffer *buffer);
void ring_buffer_record_on(struct trace_buffer *buffer);
bool ring_buffer_record_is_on(struct trace_buffer *buffer);
bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);

u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_entries(struct trace_buffer *buffer);
unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);

u64 ring_buffer_time_stamp(struct trace_buffer *buffer);
void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
int cpu, u64 *ts);
void ring_buffer_set_clock(struct trace_buffer *buffer,
u64 (*clock)(void));
void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);

size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);

void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
size_t len, int cpu, int full);

struct trace_seq;

int ring_buffer_print_entry_header(struct trace_seq *s);
int ring_buffer_print_page_header(struct trace_seq *s);

enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0,
};


int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
# 7 "./include/linux/trace_events.h" 2
# 1 "./include/linux/trace_seq.h" 1




# 1 "./include/linux/seq_buf.h" 1
# 19 "./include/linux/seq_buf.h"
struct seq_buf {
char *buffer;
size_t size;
size_t len;
loff_t readpos;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void seq_buf_clear(struct seq_buf *s)
{
s->len = 0;
s->readpos = 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
seq_buf_init(struct seq_buf *s, char *buf, unsigned int size)
{
s->buffer = buf;
s->size = size;
seq_buf_clear(s);
}





static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
seq_buf_has_overflowed(struct seq_buf *s)
{
return s->len > s->size;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
seq_buf_set_overflow(struct seq_buf *s)
{
s->len = s->size + 1;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int
seq_buf_buffer_left(struct seq_buf *s)
{
if (seq_buf_has_overflowed(s))
return 0;

return s->size - s->len;
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int seq_buf_used(struct seq_buf *s)
{
return __builtin_choose_expr(((!!(sizeof((typeof(s->len) *)1 == (typeof(s->size) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(s->len) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(s->size) * 0l)) : (int *)8))))), ((s->len) < (s->size) ? (s->len) : (s->size)), ({ typeof(s->len) __UNIQUE_ID___x640 = (s->len); typeof(s->size) __UNIQUE_ID___y641 = (s->size); ((__UNIQUE_ID___x640) < (__UNIQUE_ID___y641) ? (__UNIQUE_ID___x640) : (__UNIQUE_ID___y641)); }));
}
# 88 "./include/linux/seq_buf.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void seq_buf_terminate(struct seq_buf *s)
{
if (({ int __ret_warn_on = !!(s->size == 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/seq_buf.h"), "i" (90), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }))
return;

if (seq_buf_buffer_left(s))
s->buffer[s->len] = 0;
else
s->buffer[s->size - 1] = 0;
}
# 107 "./include/linux/seq_buf.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
{
({ int __ret_warn_on = !!(s->len > s->size + 1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/seq_buf.h"), "i" (109), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });

if (s->len < s->size) {
*bufp = s->buffer + s->len;
return s->size - s->len;
}

*bufp = ((void *)0);
return 0;
}
# 129 "./include/linux/seq_buf.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void seq_buf_commit(struct seq_buf *s, int num)
{
if (num < 0) {
seq_buf_set_overflow(s);
} else {

do { if (__builtin_expect(!!(s->len + num > s->size), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/seq_buf.h"), "i" (135), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);
s->len += num;
}
}

extern __attribute__((__format__(printf, 2, 3)))
int seq_buf_printf(struct seq_buf *s, const char *fmt, ...);
extern __attribute__((__format__(printf, 2, 0)))
int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args);
extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s);
extern int seq_buf_to_user(struct seq_buf *s, char *ubuf,
int cnt);
extern int seq_buf_puts(struct seq_buf *s, const char *str);
extern int seq_buf_putc(struct seq_buf *s, unsigned char c);
extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
unsigned int len);
extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc);
extern int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);


extern int
seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
# 6 "./include/linux/trace_seq.h" 2








struct trace_seq {
char buffer[((1UL) << (12))];
struct seq_buf seq;
int full;
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
trace_seq_init(struct trace_seq *s)
{
seq_buf_init(&s->seq, s->buffer, ((1UL) << (12)));
s->full = 0;
}
# 40 "./include/linux/trace_seq.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int trace_seq_used(struct trace_seq *s)
{
return seq_buf_used(&s->seq);
}
# 54 "./include/linux/trace_seq.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) char *
trace_seq_buffer_ptr(struct trace_seq *s)
{
return s->buffer + seq_buf_used(&s->seq);
}
# 67 "./include/linux/trace_seq.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool trace_seq_has_overflowed(struct trace_seq *s)
{
return s->full || seq_buf_has_overflowed(&s->seq);
}





extern __attribute__((__format__(printf, 2, 3)))
void trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
extern __attribute__((__format__(printf, 2, 0)))
void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
extern void
trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
extern int trace_seq_to_user(struct trace_seq *s, char *ubuf,
int cnt);
extern void trace_seq_puts(struct trace_seq *s, const char *str);
extern void trace_seq_putc(struct trace_seq *s, unsigned char c);
extern void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len);
extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
unsigned int len);
extern int trace_seq_path(struct trace_seq *s, const struct path *path);

extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
int nmaskbits);

extern int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);
# 8 "./include/linux/trace_events.h" 2


# 1 "./include/linux/perf_event.h" 1
# 17 "./include/linux/perf_event.h"
# 1 "./include/uapi/linux/perf_event.h" 1
# 29 "./include/uapi/linux/perf_event.h"
enum perf_type_id {
PERF_TYPE_HARDWARE = 0,
PERF_TYPE_SOFTWARE = 1,
PERF_TYPE_TRACEPOINT = 2,
PERF_TYPE_HW_CACHE = 3,
PERF_TYPE_RAW = 4,
PERF_TYPE_BREAKPOINT = 5,

PERF_TYPE_MAX,
};
# 60 "./include/uapi/linux/perf_event.h"
enum perf_hw_id {



PERF_COUNT_HW_CPU_CYCLES = 0,
PERF_COUNT_HW_INSTRUCTIONS = 1,
PERF_COUNT_HW_CACHE_REFERENCES = 2,
PERF_COUNT_HW_CACHE_MISSES = 3,
PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
PERF_COUNT_HW_BRANCH_MISSES = 5,
PERF_COUNT_HW_BUS_CYCLES = 6,
PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
PERF_COUNT_HW_REF_CPU_CYCLES = 9,

PERF_COUNT_HW_MAX,
};
# 85 "./include/uapi/linux/perf_event.h"
enum perf_hw_cache_id {
PERF_COUNT_HW_CACHE_L1D = 0,
PERF_COUNT_HW_CACHE_L1I = 1,
PERF_COUNT_HW_CACHE_LL = 2,
PERF_COUNT_HW_CACHE_DTLB = 3,
PERF_COUNT_HW_CACHE_ITLB = 4,
PERF_COUNT_HW_CACHE_BPU = 5,
PERF_COUNT_HW_CACHE_NODE = 6,

PERF_COUNT_HW_CACHE_MAX,
};

enum perf_hw_cache_op_id {
PERF_COUNT_HW_CACHE_OP_READ = 0,
PERF_COUNT_HW_CACHE_OP_WRITE = 1,
PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,

PERF_COUNT_HW_CACHE_OP_MAX,
};

enum perf_hw_cache_op_result_id {
PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
PERF_COUNT_HW_CACHE_RESULT_MISS = 1,

PERF_COUNT_HW_CACHE_RESULT_MAX,
};







enum perf_sw_ids {
PERF_COUNT_SW_CPU_CLOCK = 0,
PERF_COUNT_SW_TASK_CLOCK = 1,
PERF_COUNT_SW_PAGE_FAULTS = 2,
PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
PERF_COUNT_SW_CPU_MIGRATIONS = 4,
PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
PERF_COUNT_SW_EMULATION_FAULTS = 8,
PERF_COUNT_SW_DUMMY = 9,
PERF_COUNT_SW_BPF_OUTPUT = 10,
PERF_COUNT_SW_CGROUP_SWITCHES = 11,

PERF_COUNT_SW_MAX,
};





enum perf_event_sample_format {
PERF_SAMPLE_IP = 1U << 0,
PERF_SAMPLE_TID = 1U << 1,
PERF_SAMPLE_TIME = 1U << 2,
PERF_SAMPLE_ADDR = 1U << 3,
PERF_SAMPLE_READ = 1U << 4,
PERF_SAMPLE_CALLCHAIN = 1U << 5,
PERF_SAMPLE_ID = 1U << 6,
PERF_SAMPLE_CPU = 1U << 7,
PERF_SAMPLE_PERIOD = 1U << 8,
PERF_SAMPLE_STREAM_ID = 1U << 9,
PERF_SAMPLE_RAW = 1U << 10,
PERF_SAMPLE_BRANCH_STACK = 1U << 11,
PERF_SAMPLE_REGS_USER = 1U << 12,
PERF_SAMPLE_STACK_USER = 1U << 13,
PERF_SAMPLE_WEIGHT = 1U << 14,
PERF_SAMPLE_DATA_SRC = 1U << 15,
PERF_SAMPLE_IDENTIFIER = 1U << 16,
PERF_SAMPLE_TRANSACTION = 1U << 17,
PERF_SAMPLE_REGS_INTR = 1U << 18,
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
PERF_SAMPLE_AUX = 1U << 20,
PERF_SAMPLE_CGROUP = 1U << 21,
PERF_SAMPLE_DATA_PAGE_SIZE = 1U << 22,
PERF_SAMPLE_CODE_PAGE_SIZE = 1U << 23,
PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24,

PERF_SAMPLE_MAX = 1U << 25,

__PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63,
};
# 182 "./include/uapi/linux/perf_event.h"
enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_USER_SHIFT = 0,
PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1,
PERF_SAMPLE_BRANCH_HV_SHIFT = 2,

PERF_SAMPLE_BRANCH_ANY_SHIFT = 3,
PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4,
PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5,
PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6,
PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7,
PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8,
PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9,
PERF_SAMPLE_BRANCH_COND_SHIFT = 10,

PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11,
PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12,
PERF_SAMPLE_BRANCH_CALL_SHIFT = 13,

PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14,
PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15,

PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16,

PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17,

PERF_SAMPLE_BRANCH_MAX_SHIFT
};

enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,

PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,

PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,

PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,

PERF_SAMPLE_BRANCH_TYPE_SAVE =
1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,

PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,

PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};




enum {
PERF_BR_UNKNOWN = 0,
PERF_BR_COND = 1,
PERF_BR_UNCOND = 2,
PERF_BR_IND = 3,
PERF_BR_CALL = 4,
PERF_BR_IND_CALL = 5,
PERF_BR_RET = 6,
PERF_BR_SYSCALL = 7,
PERF_BR_SYSRET = 8,
PERF_BR_COND_CALL = 9,
PERF_BR_COND_RET = 10,
PERF_BR_ERET = 11,
PERF_BR_IRQ = 12,
PERF_BR_MAX,
};
# 267 "./include/uapi/linux/perf_event.h"
enum perf_sample_regs_abi {
PERF_SAMPLE_REGS_ABI_NONE = 0,
PERF_SAMPLE_REGS_ABI_32 = 1,
PERF_SAMPLE_REGS_ABI_64 = 2,
};





enum {
PERF_TXN_ELISION = (1 << 0),
PERF_TXN_TRANSACTION = (1 << 1),
PERF_TXN_SYNC = (1 << 2),
PERF_TXN_ASYNC = (1 << 3),
PERF_TXN_RETRY = (1 << 4),
PERF_TXN_CONFLICT = (1 << 5),
PERF_TXN_CAPACITY_WRITE = (1 << 6),
PERF_TXN_CAPACITY_READ = (1 << 7),

PERF_TXN_MAX = (1 << 8),



PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
PERF_TXN_ABORT_SHIFT = 32,
};
# 315 "./include/uapi/linux/perf_event.h"
enum perf_event_read_format {
PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2,
PERF_FORMAT_GROUP = 1U << 3,

PERF_FORMAT_MAX = 1U << 4,
};
# 340 "./include/uapi/linux/perf_event.h"
struct perf_event_attr {




__u32 type;




__u32 size;




__u64 config;

union {
__u64 sample_period;
__u64 sample_freq;
};

__u64 sample_type;
__u64 read_format;

__u64 disabled : 1,
inherit : 1,
pinned : 1,
exclusive : 1,
exclude_user : 1,
exclude_kernel : 1,
exclude_hv : 1,
exclude_idle : 1,
mmap : 1,
comm : 1,
freq : 1,
inherit_stat : 1,
enable_on_exec : 1,
task : 1,
watermark : 1,
# 390 "./include/uapi/linux/perf_event.h"
precise_ip : 2,
mmap_data : 1,
sample_id_all : 1,

exclude_host : 1,
exclude_guest : 1,

exclude_callchain_kernel : 1,
exclude_callchain_user : 1,
mmap2 : 1,
comm_exec : 1,
use_clockid : 1,
context_switch : 1,
write_backward : 1,
namespaces : 1,
ksymbol : 1,
bpf_event : 1,
aux_output : 1,
cgroup : 1,
text_poke : 1,
build_id : 1,
inherit_thread : 1,
remove_on_exec : 1,
sigtrap : 1,
__reserved_1 : 26;

union {
__u32 wakeup_events;
__u32 wakeup_watermark;
};

__u32 bp_type;
union {
__u64 bp_addr;
__u64 kprobe_func;
__u64 uprobe_path;
__u64 config1;
};
union {
__u64 bp_len;
__u64 kprobe_addr;
__u64 probe_offset;
__u64 config2;
};
__u64 branch_sample_type;





__u64 sample_regs_user;




__u32 sample_stack_user;

__s32 clockid;
# 456 "./include/uapi/linux/perf_event.h"
__u64 sample_regs_intr;




__u32 aux_watermark;
__u16 sample_max_stack;
__u16 __reserved_2;
__u32 aux_sample_size;
__u32 __reserved_3;







__u64 sig_data;
};






struct perf_event_query_bpf {



__u32 ids_len;




__u32 prog_cnt;



__u32 ids[0];
};
# 513 "./include/uapi/linux/perf_event.h"
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
};




struct perf_event_mmap_page {
__u32 version;
__u32 compat_version;
# 559 "./include/uapi/linux/perf_event.h"
__u32 lock;
__u32 index;
__s64 offset;
__u64 time_enabled;
__u64 time_running;
union {
__u64 capabilities;
struct {
__u64 cap_bit0 : 1,
cap_bit0_is_deprecated : 1,

cap_user_rdpmc : 1,
cap_user_time : 1,
cap_user_time_zero : 1,
cap_user_time_short : 1,
cap_____res : 58;
};
};
# 587 "./include/uapi/linux/perf_event.h"
__u16 pmc_width;
# 613 "./include/uapi/linux/perf_event.h"
__u16 time_shift;
__u32 time_mult;
__u64 time_offset;
# 632 "./include/uapi/linux/perf_event.h"
__u64 time_zero;

__u32 size;
__u32 __reserved_1;
# 648 "./include/uapi/linux/perf_event.h"
__u64 time_cycles;
__u64 time_mask;





__u8 __reserved[116*8];
# 673 "./include/uapi/linux/perf_event.h"
__u64 data_head;
__u64 data_tail;
__u64 data_offset;
__u64 data_size;
# 689 "./include/uapi/linux/perf_event.h"
__u64 aux_head;
__u64 aux_tail;
__u64 aux_offset;
__u64 aux_size;
};
# 764 "./include/uapi/linux/perf_event.h"
struct perf_event_header {
__u32 type;
__u16 misc;
__u16 size;
};

struct perf_ns_link_info {
__u64 dev;
__u64 ino;
};

enum {
NET_NS_INDEX = 0,
UTS_NS_INDEX = 1,
IPC_NS_INDEX = 2,
PID_NS_INDEX = 3,
USER_NS_INDEX = 4,
MNT_NS_INDEX = 5,
CGROUP_NS_INDEX = 6,

NR_NAMESPACES,
};

enum perf_event_type {
# 828 "./include/uapi/linux/perf_event.h"
PERF_RECORD_MMAP = 1,
# 838 "./include/uapi/linux/perf_event.h"
PERF_RECORD_LOST = 2,
# 849 "./include/uapi/linux/perf_event.h"
PERF_RECORD_COMM = 3,
# 860 "./include/uapi/linux/perf_event.h"
PERF_RECORD_EXIT = 4,
# 871 "./include/uapi/linux/perf_event.h"
PERF_RECORD_THROTTLE = 5,
PERF_RECORD_UNTHROTTLE = 6,
# 883 "./include/uapi/linux/perf_event.h"
PERF_RECORD_FORK = 7,
# 894 "./include/uapi/linux/perf_event.h"
PERF_RECORD_READ = 8,
# 976 "./include/uapi/linux/perf_event.h"
PERF_RECORD_SAMPLE = 9,
# 1008 "./include/uapi/linux/perf_event.h"
PERF_RECORD_MMAP2 = 10,
# 1022 "./include/uapi/linux/perf_event.h"
PERF_RECORD_AUX = 11,
# 1034 "./include/uapi/linux/perf_event.h"
PERF_RECORD_ITRACE_START = 12,
# 1046 "./include/uapi/linux/perf_event.h"
PERF_RECORD_LOST_SAMPLES = 13,
# 1058 "./include/uapi/linux/perf_event.h"
PERF_RECORD_SWITCH = 14,
# 1072 "./include/uapi/linux/perf_event.h"
PERF_RECORD_SWITCH_CPU_WIDE = 15,
# 1084 "./include/uapi/linux/perf_event.h"
PERF_RECORD_NAMESPACES = 16,
# 1099 "./include/uapi/linux/perf_event.h"
PERF_RECORD_KSYMBOL = 17,
# 1118 "./include/uapi/linux/perf_event.h"
PERF_RECORD_BPF_EVENT = 18,
# 1128 "./include/uapi/linux/perf_event.h"
PERF_RECORD_CGROUP = 19,
# 1146 "./include/uapi/linux/perf_event.h"
PERF_RECORD_TEXT_POKE = 20,
# 1161 "./include/uapi/linux/perf_event.h"
PERF_RECORD_AUX_OUTPUT_HW_ID = 21,

PERF_RECORD_MAX,
};

enum perf_record_ksymbol_type {
PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0,
PERF_RECORD_KSYMBOL_TYPE_BPF = 1,




PERF_RECORD_KSYMBOL_TYPE_OOL = 2,
PERF_RECORD_KSYMBOL_TYPE_MAX
};



enum perf_bpf_event_type {
PERF_BPF_EVENT_UNKNOWN = 0,
PERF_BPF_EVENT_PROG_LOAD = 1,
PERF_BPF_EVENT_PROG_UNLOAD = 2,
PERF_BPF_EVENT_MAX,
};




enum perf_callchain_context {
PERF_CONTEXT_HV = (__u64)-32,
PERF_CONTEXT_KERNEL = (__u64)-128,
PERF_CONTEXT_USER = (__u64)-512,

PERF_CONTEXT_GUEST = (__u64)-2048,
PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
PERF_CONTEXT_GUEST_USER = (__u64)-2560,

PERF_CONTEXT_MAX = (__u64)-4095,
};
# 1220 "./include/uapi/linux/perf_event.h"
union perf_mem_data_src {
__u64 val;
struct {
__u64 mem_op:5,
mem_lvl:14,
mem_snoop:5,
mem_lock:2,
mem_dtlb:7,
mem_lvl_num:4,
mem_remote:1,
mem_snoopx:2,
mem_blk:3,
mem_hops:3,
mem_rsvd:18;
};
};
# 1364 "./include/uapi/linux/perf_event.h"
struct perf_branch_entry {
__u64 from;
__u64 to;
__u64 mispred:1,
predicted:1,
in_tx:1,
abort:1,
cycles:16,
type:4,
reserved:40;
};

union perf_sample_weight {
__u64 full;

struct {
__u32 var1_dw;
__u16 var2_w;
__u16 var3_w;
};
# 1393 "./include/uapi/linux/perf_event.h"
};
# 18 "./include/linux/perf_event.h" 2
# 1 "./include/uapi/linux/bpf_perf_event.h" 1
# 11 "./include/uapi/linux/bpf_perf_event.h"
# 1 "./arch/riscv/include/uapi/asm/bpf_perf_event.h" 1






typedef struct user_regs_struct bpf_user_pt_regs_t;
# 12 "./include/uapi/linux/bpf_perf_event.h" 2

struct bpf_perf_event_data {
bpf_user_pt_regs_t regs;
__u64 sample_period;
__u64 addr;
};
# 19 "./include/linux/perf_event.h" 2






# 1 "./arch/riscv/include/asm/perf_event.h" 1
# 11 "./arch/riscv/include/asm/perf_event.h"
# 1 "./include/linux/perf_event.h" 1
# 12 "./arch/riscv/include/asm/perf_event.h" 2
# 26 "./include/linux/perf_event.h" 2
# 1 "./arch/riscv/include/generated/asm/local64.h" 1
# 27 "./include/linux/perf_event.h" 2





struct perf_guest_info_callbacks {
unsigned int (*state)(void);
unsigned long (*get_ip)(void);
unsigned int (*handle_intel_pt_intr)(void);
};
# 51 "./include/linux/perf_event.h"
# 1 "./include/linux/ftrace.h" 1
# 10 "./include/linux/ftrace.h"
# 1 "./include/linux/trace_recursion.h" 1
# 22 "./include/linux/trace_recursion.h"
enum {

TRACE_FTRACE_BIT,
TRACE_FTRACE_NMI_BIT,
TRACE_FTRACE_IRQ_BIT,
TRACE_FTRACE_SIRQ_BIT,
TRACE_FTRACE_TRANSITION_BIT,


TRACE_INTERNAL_BIT,
TRACE_INTERNAL_NMI_BIT,
TRACE_INTERNAL_IRQ_BIT,
TRACE_INTERNAL_SIRQ_BIT,
TRACE_INTERNAL_TRANSITION_BIT,

TRACE_BRANCH_BIT,







TRACE_IRQ_BIT,


TRACE_GRAPH_BIT,
# 66 "./include/linux/trace_recursion.h"
TRACE_GRAPH_DEPTH_START_BIT,
TRACE_GRAPH_DEPTH_END_BIT,






TRACE_GRAPH_NOTRACE_BIT,


TRACE_RECORD_RECURSION_BIT,
};
# 109 "./include/linux/trace_recursion.h"
enum {
TRACE_CTX_NMI,
TRACE_CTX_IRQ,
TRACE_CTX_SOFTIRQ,
TRACE_CTX_NORMAL,
TRACE_CTX_TRANSITION,
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int trace_get_context_bit(void)
{
unsigned char bit = interrupt_context_level();

return TRACE_CTX_NORMAL - bit;
}


extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
# 141 "./include/linux/trace_recursion.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
int start)
{
unsigned int val = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_642(void) ; if (!((sizeof(get_current()->trace_recursion) == sizeof(char) || sizeof(get_current()->trace_recursion) == sizeof(short) || sizeof(get_current()->trace_recursion) == sizeof(int) || sizeof(get_current()->trace_recursion) == sizeof(long)) || sizeof(get_current()->trace_recursion) == sizeof(long long))) __compiletime_assert_642(); } while (0); (*(const volatile typeof( _Generic((get_current()->trace_recursion), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (get_current()->trace_recursion))) *)&(get_current()->trace_recursion)); });
int bit;

bit = trace_get_context_bit() + start;
if (__builtin_expect(!!(val & (1 << bit)), 0)) {
# 157 "./include/linux/trace_recursion.h"
bit = TRACE_CTX_TRANSITION + start;
if (val & (1 << bit)) {
do { if (!((get_current())->trace_recursion & (1<<(TRACE_RECORD_RECURSION_BIT)))) { do { (get_current())->trace_recursion |= (1<<(TRACE_RECORD_RECURSION_BIT)); } while (0); ftrace_record_recursion(ip, pip); do { (get_current())->trace_recursion &= ~(1<<(TRACE_RECORD_RECURSION_BIT)); } while (0); } } while (0);
return -1;
}
}

val |= 1 << bit;
get_current()->trace_recursion = val;
__asm__ __volatile__("": : :"memory");

do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0);

return bit;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void trace_clear_recursion(int bit)
{
do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0);
__asm__ __volatile__("": : :"memory");
do { (get_current())->trace_recursion &= ~(1<<(bit)); } while (0);
}
# 192 "./include/linux/trace_recursion.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) int ftrace_test_recursion_trylock(unsigned long ip,
unsigned long parent_ip)
{
return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_BIT);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void ftrace_test_recursion_unlock(int bit)
{
trace_clear_recursion(bit);
}
# 11 "./include/linux/ftrace.h" 2
# 1 "./include/linux/trace_clock.h" 1
# 16 "./include/linux/trace_clock.h"
# 1 "./arch/riscv/include/generated/asm/trace_clock.h" 1
# 1 "./include/asm-generic/trace_clock.h" 1
# 2 "./arch/riscv/include/generated/asm/trace_clock.h" 2
# 17 "./include/linux/trace_clock.h" 2

extern u64 __attribute__((patchable_function_entry(0, 0))) trace_clock_local(void);
extern u64 __attribute__((patchable_function_entry(0, 0))) trace_clock(void);
extern u64 __attribute__((patchable_function_entry(0, 0))) trace_clock_jiffies(void);
extern u64 __attribute__((patchable_function_entry(0, 0))) trace_clock_global(void);
extern u64 __attribute__((patchable_function_entry(0, 0))) trace_clock_counter(void);
# 12 "./include/linux/ftrace.h" 2
# 23 "./include/linux/ftrace.h"
# 1 "./arch/riscv/include/asm/ftrace.h" 1
# 28 "./arch/riscv/include/asm/ftrace.h"
void _mcount(void);
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}

struct dyn_arch_ftrace {
};
# 81 "./arch/riscv/include/asm/ftrace.h"
struct dyn_ftrace;
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
# 24 "./include/linux/ftrace.h" 2
# 35 "./include/linux/ftrace.h"
extern void ftrace_boot_snapshot(void);





struct ftrace_ops;
struct ftrace_regs;
# 56 "./include/linux/ftrace.h"
void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);





void trace_init(void);
void early_trace_init(void);





struct module;
struct ftrace_hash;
struct ftrace_direct_func;



const char *
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym);
# 89 "./include/linux/ftrace.h"
int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
char *type, char *name,
char *module_name, int *exported);
# 103 "./include/linux/ftrace.h"
extern int ftrace_enabled;
extern int
ftrace_enable_sysctl(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);



struct ftrace_regs {
struct pt_regs regs;
};
# 124 "./include/linux/ftrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
{
if (!fregs)
return ((void *)0);

return (&(fregs)->regs);
}

typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);

ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
# 193 "./include/linux/ftrace.h"
enum {
FTRACE_OPS_FL_ENABLED = ((((1UL))) << (0)),
FTRACE_OPS_FL_DYNAMIC = ((((1UL))) << (1)),
FTRACE_OPS_FL_SAVE_REGS = ((((1UL))) << (2)),
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = ((((1UL))) << (3)),
FTRACE_OPS_FL_RECURSION = ((((1UL))) << (4)),
FTRACE_OPS_FL_STUB = ((((1UL))) << (5)),
FTRACE_OPS_FL_INITIALIZED = ((((1UL))) << (6)),
FTRACE_OPS_FL_DELETED = ((((1UL))) << (7)),
FTRACE_OPS_FL_ADDING = ((((1UL))) << (8)),
FTRACE_OPS_FL_REMOVING = ((((1UL))) << (9)),
FTRACE_OPS_FL_MODIFYING = ((((1UL))) << (10)),
FTRACE_OPS_FL_ALLOC_TRAMP = ((((1UL))) << (11)),
FTRACE_OPS_FL_IPMODIFY = ((((1UL))) << (12)),
FTRACE_OPS_FL_PID = ((((1UL))) << (13)),
FTRACE_OPS_FL_RCU = ((((1UL))) << (14)),
FTRACE_OPS_FL_TRACE_ARRAY = ((((1UL))) << (15)),
FTRACE_OPS_FL_PERMANENT = ((((1UL))) << (16)),
FTRACE_OPS_FL_DIRECT = ((((1UL))) << (17)),
};



struct ftrace_ops_hash {
struct ftrace_hash *notrace_hash;
struct ftrace_hash *filter_hash;
struct mutex regex_lock;
};

void ftrace_free_init_mem(void);
void ftrace_free_mem(struct module *mod, void *start, void *end);
# 243 "./include/linux/ftrace.h"
struct ftrace_ops {
ftrace_func_t func;
struct ftrace_ops *next;
unsigned long flags;
void *private;
ftrace_func_t saved_func;

struct ftrace_ops_hash local_hash;
struct ftrace_ops_hash *func_hash;
struct ftrace_ops_hash old_hash;
unsigned long trampoline;
unsigned long trampoline_size;
struct list_head list;

};

extern struct ftrace_ops *ftrace_ops_list;
extern struct ftrace_ops ftrace_list_end;
# 285 "./include/linux/ftrace.h"
enum ftrace_tracing_type_t {
FTRACE_TYPE_ENTER = 0,
FTRACE_TYPE_RETURN,
};


extern enum ftrace_tracing_type_t ftrace_tracing_type;
# 300 "./include/linux/ftrace.h"
int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);

extern void ftrace_stub(unsigned long a0, unsigned long a1,
struct ftrace_ops *op, struct ftrace_regs *fregs);
# 318 "./include/linux/ftrace.h"
struct ftrace_func_entry {
struct hlist_node hlist;
unsigned long ip;
unsigned long direct;
};

struct dyn_ftrace;
# 342 "./include/linux/ftrace.h"
struct ftrace_ops;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int register_ftrace_direct(unsigned long ip, unsigned long addr)
{
return -524;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
{
return -524;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int modify_ftrace_direct(unsigned long ip,
unsigned long old_addr, unsigned long new_addr)
{
return -524;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
{
return ((void *)0);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
struct dyn_ftrace *rec,
unsigned long old_addr,
unsigned long new_addr)
{
return -19;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long ftrace_find_rec_direct(unsigned long ip)
{
return 0;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
{
return -19;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
{
return -19;
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
{
return -19;
}
# 400 "./include/linux/ftrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void arch_ftrace_set_direct_caller(struct pt_regs *regs,
unsigned long addr) { }




extern int stack_tracer_enabled;

int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);


extern __attribute__((section(".data..percpu" ""))) __typeof__(int) disable_stack_tracer;
# 425 "./include/linux/ftrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void stack_tracer_disable(void)
{

if (0)
({ int __ret_warn_on = !!(!preempt_count() || !({ unsigned long _flags; do { ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _flags = arch_local_save_flags(); } while (0); ({ ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(_flags); }); })); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/ftrace.h"), "i" (429), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
do { do { const void *__vpp_verify = (typeof((&(disable_stack_tracer)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(disable_stack_tracer)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(disable_stack_tracer)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer))); (typeof((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(disable_stack_tracer)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer))); (typeof((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(disable_stack_tracer)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer))); (typeof((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(disable_stack_tracer)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer))); (typeof((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}







static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void stack_tracer_enable(void)
{
if (0)
({ int __ret_warn_on = !!(!preempt_count() || !({ unsigned long _flags; do { ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _flags = arch_local_save_flags(); } while (0); ({ ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(_flags); }); })); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("include/linux/ftrace.h"), "i" (442), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
do { do { const void *__vpp_verify = (typeof((&(disable_stack_tracer)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(disable_stack_tracer)) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(disable_stack_tracer)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer))); (typeof((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(disable_stack_tracer))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(disable_stack_tracer)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer))); (typeof((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(disable_stack_tracer))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(disable_stack_tracer)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer))); (typeof((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(disable_stack_tracer))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&(disable_stack_tracer)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer))); (typeof((typeof(*(&(disable_stack_tracer))) *)(&(disable_stack_tracer)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += -(typeof(disable_stack_tracer))(1); } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}







int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void);

enum ftrace_bug_type {
FTRACE_BUG_UNKNOWN,
FTRACE_BUG_INIT,
FTRACE_BUG_NOP,
FTRACE_BUG_CALL,
FTRACE_BUG_UPDATE,
};
extern enum ftrace_bug_type ftrace_bug_type;





extern const void *ftrace_expected;

void ftrace_bug(int err, struct dyn_ftrace *rec);

struct seq_file;

extern int ftrace_text_reserved(const void *start, const void *end);

struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);

bool is_ftrace_trampoline(unsigned long addr);
# 500 "./include/linux/ftrace.h"
enum {
FTRACE_FL_ENABLED = (1UL << 31),
FTRACE_FL_REGS = (1UL << 30),
FTRACE_FL_REGS_EN = (1UL << 29),
FTRACE_FL_TRAMP = (1UL << 28),
FTRACE_FL_TRAMP_EN = (1UL << 27),
FTRACE_FL_IPMODIFY = (1UL << 26),
FTRACE_FL_DISABLED = (1UL << 25),
FTRACE_FL_DIRECT = (1UL << 24),
FTRACE_FL_DIRECT_EN = (1UL << 23),
};






struct dyn_ftrace {
unsigned long ip;
unsigned long flags;
struct dyn_arch_ftrace arch;
};

int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset);
int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
unsigned int cnt, int remove, int reset);
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
void ftrace_free_filter(struct ftrace_ops *ops);
void ftrace_ops_set_global_filter(struct ftrace_ops *ops);

enum {
FTRACE_UPDATE_CALLS = (1 << 0),
FTRACE_DISABLE_CALLS = (1 << 1),
FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
FTRACE_START_FUNC_RET = (1 << 3),
FTRACE_STOP_FUNC_RET = (1 << 4),
FTRACE_MAY_SLEEP = (1 << 5),
};
# 556 "./include/linux/ftrace.h"
enum {
FTRACE_UPDATE_IGNORE,
FTRACE_UPDATE_MAKE_CALL,
FTRACE_UPDATE_MODIFY_CALL,
FTRACE_UPDATE_MAKE_NOP,
};

enum {
FTRACE_ITER_FILTER = (1 << 0),
FTRACE_ITER_NOTRACE = (1 << 1),
FTRACE_ITER_PRINTALL = (1 << 2),
FTRACE_ITER_DO_PROBES = (1 << 3),
FTRACE_ITER_PROBE = (1 << 4),
FTRACE_ITER_MOD = (1 << 5),
FTRACE_ITER_ENABLED = (1 << 6),
};

void arch_ftrace_update_code(int command);
void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
void arch_ftrace_trampoline_free(struct ftrace_ops *ops);

struct ftrace_rec_iter;

struct ftrace_rec_iter *ftrace_rec_iter_start(void);
struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);







int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
void ftrace_run_stop_machine(int command);
unsigned long ftrace_location(unsigned long ip);
unsigned long ftrace_location_range(unsigned long start, unsigned long end);
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);

extern ftrace_func_t ftrace_trace_function;

int ftrace_regex_open(struct ftrace_ops *ops, int flag,
struct inode *inode, struct file *file);
ssize_t ftrace_filter_write(struct file *file, const char *ubuf,
size_t cnt, loff_t *ppos);
ssize_t ftrace_notrace_write(struct file *file, const char *ubuf,
size_t cnt, loff_t *ppos);
int ftrace_regex_release(struct inode *inode, struct file *file);

void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi")))
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);


extern int ftrace_ip_converted(unsigned long ip);
extern int ftrace_dyn_arch_init(void);
extern void ftrace_replace_code(int enable);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
extern void ftrace_regs_caller(void);
extern void ftrace_call(void);
extern void ftrace_regs_call(void);
extern void mcount_call(void);

void ftrace_modify_all_code(int command);
# 651 "./include/linux/ftrace.h"
extern void ftrace_graph_caller(void);
extern int ftrace_enable_ftrace_graph_caller(void);
extern int ftrace_disable_ftrace_graph_caller(void);
# 680 "./include/linux/ftrace.h"
extern int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr);
# 748 "./include/linux/ftrace.h"
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
# 772 "./include/linux/ftrace.h"
extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr);
# 784 "./include/linux/ftrace.h"
extern int ftrace_arch_read_dyn_info(char *buf, int size);

extern int skip_trace(unsigned long ip);
extern void ftrace_module_init(struct module *mod);
extern void ftrace_module_enable(struct module *mod);
extern void ftrace_release_mod(struct module *mod);

extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
# 846 "./include/linux/ftrace.h"
void ftrace_kill(void);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tracer_disable(void)
{

ftrace_enabled = 0;

}






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int __ftrace_enabled_save(void)
{

int saved_ftrace_enabled = ftrace_enabled;
ftrace_enabled = 0;
return saved_ftrace_enabled;



}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void __ftrace_enabled_restore(int enabled)
{

ftrace_enabled = enabled;

}
# 900 "./include/linux/ftrace.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long get_lock_parent_ip(void)
{
unsigned long addr = ((unsigned long)__builtin_return_address(0));

if (!in_lock_functions(addr))
return addr;
addr = ((unsigned long)__builtin_return_address(1));
if (!in_lock_functions(addr))
return addr;
return ((unsigned long)__builtin_return_address(2));
}
# 925 "./include/linux/ftrace.h"
extern void ftrace_init(void);
# 940 "./include/linux/ftrace.h"
struct ftrace_graph_ent {
unsigned long func;
int depth;
} __attribute__((__packed__));






struct ftrace_graph_ret {
unsigned long func;
int depth;

unsigned int overrun;
unsigned long long calltime;
unsigned long long rettime;
} __attribute__((__packed__));


typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *);
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *);

extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);



struct fgraph_ops {
trace_func_graph_ent_t entryfunc;
trace_func_graph_ret_t retfunc;
};






struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
unsigned long long calltime;

unsigned long long subtime;


unsigned long fp;


unsigned long *retp;

};






extern void return_to_handler(void);

extern int
function_graph_enter(unsigned long ret, unsigned long func,
unsigned long frame_pointer, unsigned long *retp);

struct ftrace_ret_stack *
ftrace_graph_get_ret_stack(struct task_struct *task, int idx);

unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);
# 1019 "./include/linux/ftrace.h"
extern int register_ftrace_graph(struct fgraph_ops *ops);
extern void unregister_ftrace_graph(struct fgraph_ops *ops);
# 1029 "./include/linux/ftrace.h"
extern struct static_key_false kill_ftrace_graph;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool ftrace_graph_is_dead(void)
{
return __builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&kill_ftrace_graph)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&kill_ftrace_graph)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&kill_ftrace_graph)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&kill_ftrace_graph)->key) > 0; })), 0);
}

extern void ftrace_graph_stop(void);


extern trace_func_graph_ret_t ftrace_graph_return;
extern trace_func_graph_ent_t ftrace_graph_entry;

extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void pause_graph_tracing(void)
{
atomic_inc(&get_current()->tracing_graph_pause);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void unpause_graph_tracing(void)
{
atomic_dec(&get_current()->tracing_graph_pause);
}
# 1081 "./include/linux/ftrace.h"
enum {
TSK_TRACE_FL_TRACE_BIT = 0,
TSK_TRACE_FL_GRAPH_BIT = 1,
};
enum {
TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
};

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_tsk_trace_trace(struct task_struct *tsk)
{
set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_tsk_trace_trace(struct task_struct *tsk)
{
clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_tsk_trace_trace(struct task_struct *tsk)
{
return tsk->trace & TSK_TRACE_FL_TRACE;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void set_tsk_trace_graph(struct task_struct *tsk)
{
set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void clear_tsk_trace_graph(struct task_struct *tsk)
{
clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int test_tsk_trace_graph(struct task_struct *tsk)
{
return tsk->trace & TSK_TRACE_FL_GRAPH;
}

enum ftrace_dump_mode;

extern enum ftrace_dump_mode ftrace_dump_on_oops;
extern int tracepoint_printk;

extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;

int tracepoint_printk_sysctl(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);







unsigned long arch_syscall_addr(int nr);
# 52 "./include/linux/perf_event.h" 2

# 1 "./include/linux/irq_work.h" 1
# 17 "./include/linux/irq_work.h"
struct irq_work {
struct __call_single_node node;
void (*func)(struct irq_work *);
struct rcuwait irqwait;
};
# 36 "./include/linux/irq_work.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0)))
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
{
*work = (struct irq_work){ .node = { .u_flags = (0), }, .func = (func), .irqwait = { .task = ((void *)0), }, };
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irq_work_is_pending(struct irq_work *work)
{
return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irq_work_is_busy(struct irq_work *work)
{
return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool irq_work_is_hard(struct irq_work *work)
{
return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ;
}

bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);

void irq_work_tick(void);
void irq_work_sync(struct irq_work *work);



# 1 "./arch/riscv/include/asm/irq_work.h" 1




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool arch_irq_work_has_interrupt(void)
{
return true;
}
extern void arch_irq_work_raise(void);
# 65 "./include/linux/irq_work.h" 2

void irq_work_run(void);
bool irq_work_needs_cpu(void);
void irq_work_single(void *arg);
# 54 "./include/linux/perf_event.h" 2
# 1 "./include/linux/static_key.h" 1
# 55 "./include/linux/perf_event.h" 2



# 1 "./include/linux/perf_regs.h" 1




# 1 "./include/linux/sched/task_stack.h" 1
# 10 "./include/linux/sched/task_stack.h"
# 1 "./include/uapi/linux/magic.h" 1
# 11 "./include/linux/sched/task_stack.h" 2








static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *task_stack_page(const struct task_struct *task)
{
return task->stack;
}



static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long *end_of_stack(const struct task_struct *task)
{



return task->stack;

}
# 66 "./include/linux/sched/task_stack.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void *try_get_task_stack(struct task_struct *tsk)
{
return refcount_inc_not_zero(&tsk->stack_refcount) ?
task_stack_page(tsk) : ((void *)0);
}

extern void put_task_stack(struct task_struct *tsk);
# 82 "./include/linux/sched/task_stack.h"
void exit_task_stack_account(struct task_struct *tsk);




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int object_is_on_stack(const void *obj)
{
void *stack = task_stack_page(get_current());

return (obj >= stack) && (obj < (stack + (((1UL) << (12)) << (2 + 0))));
}

extern void thread_stack_cache_init(void);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long stack_not_used(struct task_struct *p)
{
unsigned long *n = end_of_stack(p);

do {



n++;

} while (!*n);




return (unsigned long)n - (unsigned long)end_of_stack(p);

}

extern void set_task_stack_end_magic(struct task_struct *tsk);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int kstack_end(void *addr)
{



return !(((unsigned long)addr+sizeof(void*)-1) & ((((1UL) << (12)) << (2 + 0))-sizeof(void*)));
}
# 6 "./include/linux/perf_regs.h" 2

struct perf_regs {
__u64 abi;
struct pt_regs *regs;
};



# 1 "./arch/riscv/include/uapi/asm/perf_regs.h" 1






enum perf_event_riscv_regs {
PERF_REG_RISCV_PC,
PERF_REG_RISCV_RA,
PERF_REG_RISCV_SP,
PERF_REG_RISCV_GP,
PERF_REG_RISCV_TP,
PERF_REG_RISCV_T0,
PERF_REG_RISCV_T1,
PERF_REG_RISCV_T2,
PERF_REG_RISCV_S0,
PERF_REG_RISCV_S1,
PERF_REG_RISCV_A0,
PERF_REG_RISCV_A1,
PERF_REG_RISCV_A2,
PERF_REG_RISCV_A3,
PERF_REG_RISCV_A4,
PERF_REG_RISCV_A5,
PERF_REG_RISCV_A6,
PERF_REG_RISCV_A7,
PERF_REG_RISCV_S2,
PERF_REG_RISCV_S3,
PERF_REG_RISCV_S4,
PERF_REG_RISCV_S5,
PERF_REG_RISCV_S6,
PERF_REG_RISCV_S7,
PERF_REG_RISCV_S8,
PERF_REG_RISCV_S9,
PERF_REG_RISCV_S10,
PERF_REG_RISCV_S11,
PERF_REG_RISCV_T3,
PERF_REG_RISCV_T4,
PERF_REG_RISCV_T5,
PERF_REG_RISCV_T6,
PERF_REG_RISCV_MAX,
};
# 14 "./include/linux/perf_regs.h" 2





u64 perf_reg_value(struct pt_regs *regs, int idx);
int perf_reg_validate(u64 mask);
u64 perf_reg_abi(struct task_struct *task);
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs);
# 59 "./include/linux/perf_event.h" 2




# 1 "./arch/riscv/include/generated/asm/local.h" 1
# 64 "./include/linux/perf_event.h" 2

struct perf_callchain_entry {
__u64 nr;
__u64 ip[];
};

struct perf_callchain_entry_ctx {
struct perf_callchain_entry *entry;
u32 max_stack;
u32 nr;
short contexts;
bool contexts_maxed;
};

typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
unsigned long off, unsigned long len);

struct perf_raw_frag {
union {
struct perf_raw_frag *next;
unsigned long pad;
};
perf_copy_f copy;
void *data;
u32 size;
} __attribute__((__packed__));

struct perf_raw_record {
struct perf_raw_frag frag;
u32 size;
};
# 116 "./include/linux/perf_event.h"
struct perf_branch_stack {
__u64 nr;
__u64 hw_idx;
struct perf_branch_entry entries[];
};

struct task_struct;




struct hw_perf_event_extra {
u64 config;
unsigned int reg;
int alloc;
int idx;
};
# 146 "./include/linux/perf_event.h"
struct hw_perf_event {

union {
struct {
u64 config;
u64 last_tag;
unsigned long config_base;
unsigned long event_base;
int event_base_rdpmc;
int idx;
int last_cpu;
int flags;

struct hw_perf_event_extra extra_reg;
struct hw_perf_event_extra branch_reg;
};
struct {
struct hrtimer hrtimer;
};
struct {

struct list_head tp_list;
};
struct {
u64 pwr_acc;
u64 ptsc;
};
# 184 "./include/linux/perf_event.h"
struct {
u8 iommu_bank;
u8 iommu_cntr;
u16 padding;
u64 conf;
u64 conf1;
};
};




struct task_struct *target;





void *addr_filters;


unsigned long addr_filters_gen;
# 214 "./include/linux/perf_event.h"
int state;





local64_t prev_count;




u64 sample_period;

union {
struct {



u64 last_period;







local64_t period_left;
};
struct {
u64 saved_metric;
u64 saved_slots;
};
};





u64 interrupts_seq;
u64 interrupts;





u64 freq_time_stamp;
u64 freq_count_stamp;

};

struct perf_event;
# 286 "./include/linux/perf_event.h"
struct perf_output_handle;




struct pmu {
struct list_head entry;

struct module *module;
struct device *dev;
const struct attribute_group **attr_groups;
const struct attribute_group **attr_update;
const char *name;
int type;




int capabilities;

int *pmu_disable_count;
struct perf_cpu_context *pmu_cpu_context;
atomic_t exclusive_cnt;
int task_ctx_nr;
int hrtimer_interval_ms;


unsigned int nr_addr_filters;





void (*pmu_enable) (struct pmu *pmu);
void (*pmu_disable) (struct pmu *pmu);
# 338 "./include/linux/perf_event.h"
int (*event_init) (struct perf_event *event);





void (*event_mapped) (struct perf_event *event, struct mm_struct *mm);
void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm);
# 373 "./include/linux/perf_event.h"
int (*add) (struct perf_event *event, int flags);
void (*del) (struct perf_event *event, int flags);
# 394 "./include/linux/perf_event.h"
void (*start) (struct perf_event *event, int flags);
void (*stop) (struct perf_event *event, int flags);







void (*read) (struct perf_event *event);
# 415 "./include/linux/perf_event.h"
void (*start_txn) (struct pmu *pmu, unsigned int txn_flags);
# 424 "./include/linux/perf_event.h"
int (*commit_txn) (struct pmu *pmu);






void (*cancel_txn) (struct pmu *pmu);





int (*event_idx) (struct perf_event *event);




void (*sched_task) (struct perf_event_context *ctx,
bool sched_in);




struct kmem_cache *task_ctx_cache;







void (*swap_task_ctx) (struct perf_event_context *prev,
struct perf_event_context *next);





void *(*setup_aux) (struct perf_event *event, void **pages,
int nr_pages, bool overwrite);





void (*free_aux) (void *aux);
# 481 "./include/linux/perf_event.h"
long (*snapshot_aux) (struct perf_event *event,
struct perf_output_handle *handle,
unsigned long size);
# 493 "./include/linux/perf_event.h"
int (*addr_filters_validate) (struct list_head *filters);
# 507 "./include/linux/perf_event.h"
void (*addr_filters_sync) (struct perf_event *event);
# 517 "./include/linux/perf_event.h"
int (*aux_output_match) (struct perf_event *event);





int (*filter_match) (struct perf_event *event);




int (*check_period) (struct perf_event *event, u64 value);
};

enum perf_addr_filter_action_t {
PERF_ADDR_FILTER_ACTION_STOP = 0,
PERF_ADDR_FILTER_ACTION_START,
PERF_ADDR_FILTER_ACTION_FILTER,
};
# 547 "./include/linux/perf_event.h"
struct perf_addr_filter {
struct list_head entry;
struct path path;
unsigned long offset;
unsigned long size;
enum perf_addr_filter_action_t action;
};
# 565 "./include/linux/perf_event.h"
struct perf_addr_filters_head {
struct list_head list;
raw_spinlock_t lock;
unsigned int nr_file_filters;
};

struct perf_addr_filter_range {
unsigned long start;
unsigned long size;
};




enum perf_event_state {
PERF_EVENT_STATE_DEAD = -4,
PERF_EVENT_STATE_EXIT = -3,
PERF_EVENT_STATE_ERROR = -2,
PERF_EVENT_STATE_OFF = -1,
PERF_EVENT_STATE_INACTIVE = 0,
PERF_EVENT_STATE_ACTIVE = 1,
};

struct file;
struct perf_sample_data;

typedef void (*perf_overflow_handler_t)(struct perf_event *,
struct perf_sample_data *,
struct pt_regs *regs);
# 612 "./include/linux/perf_event.h"
struct swevent_hlist {
struct hlist_head heads[(1 << 8)];
struct callback_head callback_head;
};
# 625 "./include/linux/perf_event.h"
struct bpf_prog;
struct perf_cgroup;
struct perf_buffer;

struct pmu_event_list {
raw_spinlock_t lock;
struct list_head list;
};
# 641 "./include/linux/perf_event.h"
struct perf_event {






struct list_head event_entry;





struct list_head sibling_list;
struct list_head active_list;



struct rb_node group_node;
u64 group_index;





struct list_head migrate_entry;

struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;


int event_caps;

int group_caps;

struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;

enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;







u64 total_time_enabled;
u64 total_time_running;
u64 tstamp;

struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;

struct perf_event_context *ctx;
atomic_long_t refcount;





atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;




struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;

int oncpu;
int cpu;

struct list_head owner_entry;
struct task_struct *owner;


struct mutex mmap_mutex;
atomic_t mmap_count;

struct perf_buffer *rb;
struct list_head rb_entry;
unsigned long rcu_batches;
int rcu_pending;


wait_queue_head_t waitq;
struct fasync_struct *fasync;


int pending_wakeup;
int pending_kill;
int pending_disable;
unsigned long pending_addr;
struct irq_work pending;

atomic_t event_limit;


struct perf_addr_filters_head addr_filters;

struct perf_addr_filter_range *addr_filter_ranges;
unsigned long addr_filters_gen;


struct perf_event *aux_event;

void (*destroy)(struct perf_event *);
struct callback_head callback_head;

struct pid_namespace *ns;
u64 id;

u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;

perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
u64 bpf_cookie;



struct trace_event_call *tp_event;
struct event_filter *filter;

struct ftrace_ops ftrace_ops;
# 786 "./include/linux/perf_event.h"
struct list_head sb_list;

};


struct perf_event_groups {
struct rb_root tree;
u64 index;
};






struct perf_event_context {
struct pmu *pmu;




raw_spinlock_t lock;





struct mutex mutex;

struct list_head active_ctx_list;
struct perf_event_groups pinned_groups;
struct perf_event_groups flexible_groups;
struct list_head event_list;

struct list_head pinned_active;
struct list_head flexible_active;

int nr_events;
int nr_active;
int nr_user;
int is_active;
int nr_stat;
int nr_freq;
int rotate_disable;




int rotate_necessary;
refcount_t refcount;
struct task_struct *task;




u64 time;
u64 timestamp;
u64 timeoffset;





struct perf_event_context *parent_ctx;
u64 parent_gen;
u64 generation;
int pin_count;



void *task_ctx_data;
struct callback_head callback_head;
};
# 869 "./include/linux/perf_event.h"
struct perf_cpu_context {
struct perf_event_context ctx;
struct perf_event_context *task_ctx;
int active_oncpu;
int exclusive;

raw_spinlock_t hrtimer_lock;
struct hrtimer hrtimer;
ktime_t hrtimer_interval;
unsigned int hrtimer_active;






struct list_head sched_cb_entry;
int sched_cb_usage;

int online;




int heap_size;
struct perf_event **heap;
struct perf_event *heap_default[2];
};

struct perf_output_handle {
struct perf_event *event;
struct perf_buffer *rb;
unsigned long wakeup;
unsigned long size;
u64 aux_flags;
union {
void *addr;
unsigned long head;
};
int page;
};

struct bpf_perf_event_data_kern {
bpf_user_pt_regs_t *regs;
struct perf_sample_data *data;
struct perf_event *event;
};
# 952 "./include/linux/perf_event.h"
extern void *perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event);
extern void perf_aux_output_end(struct perf_output_handle *handle,
unsigned long size);
extern int perf_aux_output_skip(struct perf_output_handle *handle,
unsigned long size);
extern void *perf_get_aux(struct perf_output_handle *handle);
extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
extern void perf_event_itrace_started(struct perf_event *event);

extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);

extern void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task);
extern void __perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next);
extern int perf_event_init_task(struct task_struct *child, u64 clone_flags);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
extern struct file *perf_event_get(unsigned int fd);
extern const struct perf_event *perf_get_event(struct file *file);
extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
extern void perf_sched_cb_dec(struct pmu *pmu);
extern void perf_sched_cb_inc(struct pmu *pmu);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);

extern void perf_pmu_resched(struct pmu *pmu);

extern int perf_event_refresh(struct perf_event *event, int refresh);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
int cpu,
struct task_struct *task,
perf_overflow_handler_t callback,
void *context);
extern void perf_pmu_migrate_context(struct pmu *pmu,
int src_cpu, int dst_cpu);
int perf_event_read_local(struct perf_event *event, u64 *value,
u64 *enabled, u64 *running);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);


struct perf_sample_data {




u64 addr;
struct perf_raw_record *raw;
struct perf_branch_stack *br_stack;
u64 period;
union perf_sample_weight weight;
u64 txn;
union perf_mem_data_src data_src;





u64 type;
u64 ip;
struct {
u32 pid;
u32 tid;
} tid_entry;
u64 time;
u64 id;
u64 stream_id;
struct {
u32 cpu;
u32 reserved;
} cpu_entry;
struct perf_callchain_entry *callchain;
u64 aux_size;

struct perf_regs regs_user;
struct perf_regs regs_intr;
u64 stack_user_size;

u64 phys_addr;
u64 cgroup;
u64 data_page_size;
u64 code_page_size;
} __attribute__((__aligned__((1 << 6))));
# 1053 "./include/linux/perf_event.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void perf_sample_data_init(struct perf_sample_data *data,
u64 addr, u64 period)
{

data->addr = addr;
data->raw = ((void *)0);
data->br_stack = ((void *)0);
data->period = period;
data->weight.full = 0;
data->data_src.val = ((((__u64)0x01) << 0) | (((__u64)0x01) << 5) | (((__u64)0x01) << 19) | (((__u64)0x01) << 24) | (((__u64)0x01) << 26));
data->txn = 0;
}

extern void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event);
extern void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs);

extern int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);

extern void perf_event_output_forward(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
extern void perf_event_output_backward(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
extern int perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool
is_default_overflow_handler(struct perf_event *event)
{
if (__builtin_expect(!!(event->overflow_handler == perf_event_output_forward), 1))
return true;
if (__builtin_expect(!!(event->overflow_handler == perf_event_output_backward), 0))
return true;
return false;
}

extern void
perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event);
extern void
perf_event__output_id_sample(struct perf_event *event,
struct perf_output_handle *handle,
struct perf_sample_data *sample);

extern void
perf_log_lost_samples(struct perf_event *event, u64 lost);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool event_has_any_exclude_flag(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;

return attr->exclude_idle || attr->exclude_user ||
attr->exclude_kernel || attr->exclude_hv ||
attr->exclude_guest || attr->exclude_host;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_sampling_event(struct perf_event *event)
{
return event->attr.sample_period != 0;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int is_software_event(struct perf_event *event)
{
return event->event_caps & ((((1UL))) << (0));
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int in_software_context(struct perf_event *event)
{
return event->ctx->pmu->task_ctx_nr == perf_sw_context;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int is_exclusive_pmu(struct pmu *pmu)
{
return pmu->capabilities & 0x0010;
}

extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];

extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
# 1169 "./include/linux/perf_event.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void perf_fetch_caller_regs(struct pt_regs *regs)
{
perf_arch_fetch_caller_regs(regs, ((unsigned long)__builtin_return_address(0)));
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
if (static_key_false(&perf_swevent_enabled[event_id]))
__perf_sw_event(event_id, nr, regs, addr);
}

extern __attribute__((section(".data..percpu" ""))) __typeof__(struct pt_regs) __perf_regs[4];






static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
{
struct pt_regs *regs = ({ do { const void *__vpp_verify = (typeof((&__perf_regs[0]) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&__perf_regs[0])) *)(&__perf_regs[0])); (typeof((typeof(*(&__perf_regs[0])) *)(&__perf_regs[0]))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });

perf_fetch_caller_regs(regs);
___perf_sw_event(event_id, nr, regs, addr);
}

extern struct static_key_false perf_sched_events;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool __perf_sw_enabled(int swevt)
{
return static_key_false(&perf_swevent_enabled[swevt]);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void perf_event_task_migrate(struct task_struct *task)
{
if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS))
task->sched_migrated = 1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
if (__builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&perf_sched_events)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&perf_sched_events)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&perf_sched_events)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&perf_sched_events)->key) > 0; })), 0))
__perf_event_task_sched_in(prev, task);

if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) &&
task->sched_migrated) {
__perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
task->sched_migrated = 0;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next)
{
if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES))
__perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
# 1235 "./include/linux/perf_event.h"
if (__builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&perf_sched_events)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&perf_sched_events)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&perf_sched_events)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&perf_sched_events)->key) > 0; })), 0))
__perf_event_task_sched_out(prev, next);
}

extern void perf_event_mmap(struct vm_area_struct *vma);

extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
bool unregister, const char *sym);
extern void perf_event_bpf_event(struct bpf_prog *prog,
enum perf_bpf_event_type type,
u16 flags);
# 1269 "./include/linux/perf_event.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int perf_guest_state(void) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned long perf_guest_get_ip(void) { return 0; }
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; }


extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
extern void perf_event_namespaces(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
extern void perf_event_text_poke(const void *addr,
const void *old_bytes, size_t old_len,
const void *new_bytes, size_t new_len);


extern __attribute__((section(".data..percpu" ""))) __typeof__(struct perf_callchain_entry) perf_callchain_entry;

extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark);
extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
extern void put_callchain_entry(int rctx);

extern int sysctl_perf_event_max_stack;
extern int sysctl_perf_event_max_contexts_per_stack;

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
{
if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
struct perf_callchain_entry *entry = ctx->entry;
entry->ip[entry->nr++] = ip;
++ctx->contexts;
return 0;
} else {
ctx->contexts_maxed = true;
return -1;
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
{
if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
struct perf_callchain_entry *entry = ctx->entry;
entry->ip[entry->nr++] = ip;
++ctx->nr;
return 0;
} else {
return -1;
}
}

extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
extern int sysctl_perf_cpu_time_max_percent;

extern void perf_sample_event_took(u64 sample_len_ns);

int perf_proc_update_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int perf_event_max_stack_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
# 1346 "./include/linux/perf_event.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int perf_is_paranoid(void)
{
return sysctl_perf_event_paranoid > -1;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int perf_allow_kernel(struct perf_event_attr *attr)
{
if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
return -13;

return security_perf_event_open(attr, 2);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int perf_allow_cpu(struct perf_event_attr *attr)
{
if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
return -13;

return security_perf_event_open(attr, 1);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) int perf_allow_tracepoint(struct perf_event_attr *attr)
{
if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
return -1;

return security_perf_event_open(attr, 3);
}

extern void perf_event_init(void);
extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs,
struct hlist_head *head, int rctx,
struct task_struct *task);
extern void perf_bp_event(struct perf_event *event, void *data);
# 1391 "./include/linux/perf_event.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool has_branch_stack(struct perf_event *event)
{
return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool needs_branch_stack(struct perf_event *event)
{
return event->attr.branch_sample_type != 0;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool has_aux(struct perf_event *event)
{
return event->pmu->setup_aux;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool is_write_backward(struct perf_event *event)
{
return !!event->attr.write_backward;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool has_addr_filter(struct perf_event *event)
{
return event->pmu->nr_addr_filters;
}




static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct perf_addr_filters_head *
perf_event_addr_filters(struct perf_event *event)
{
struct perf_addr_filters_head *ifh = &event->addr_filters;

if (event->parent)
ifh = &event->parent->addr_filters;

return ifh;
}

extern void perf_event_addr_filters_sync(struct perf_event *event);
extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id);

extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_sample_data *data,
struct perf_event *event, unsigned int size);
extern int perf_output_begin_forward(struct perf_output_handle *handle,
struct perf_sample_data *data,
struct perf_event *event,
unsigned int size);
extern int perf_output_begin_backward(struct perf_output_handle *handle,
struct perf_sample_data *data,
struct perf_event *event,
unsigned int size);

extern void perf_output_end(struct perf_output_handle *handle);
extern unsigned int perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
extern unsigned int perf_output_skip(struct perf_output_handle *handle,
unsigned int len);
extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
struct perf_output_handle *handle,
unsigned long from, unsigned long to);
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
extern u64 perf_swevent_set_period(struct perf_event *event);
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
extern void perf_event_disable_local(struct perf_event *event);
extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
extern int perf_event_account_interrupt(struct perf_event *event);
extern int perf_event_period(struct perf_event *event, u64 value);
extern u64 perf_event_pause(struct perf_event *event, bool reset);
# 1555 "./include/linux/perf_event.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void perf_restore_debug_store(void) { }


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool perf_raw_frag_last(const struct perf_raw_frag *frag)
{
return frag->pad < sizeof(u64);
}



struct perf_pmu_events_attr {
struct device_attribute attr;
u64 id;
const char *event_str;
};

struct perf_pmu_events_ht_attr {
struct device_attribute attr;
u64 id;
const char *event_str_ht;
const char *event_str_noht;
};

struct perf_pmu_events_hybrid_attr {
struct device_attribute attr;
u64 id;
const char *event_str;
u64 pmu_type;
};

struct perf_pmu_format_hybrid_attr {
struct device_attribute attr;
u64 pmu_type;
};

ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
# 1626 "./include/linux/perf_event.h"
int perf_event_init_cpu(unsigned int cpu);
int perf_event_exit_cpu(unsigned int cpu);





extern void __attribute__((__weak__)) arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg,
u64 now);


extern __attribute__((__weak__)) u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
# 1659 "./include/linux/perf_event.h"
typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries,
unsigned int cnt);
extern struct static_call_key __SCK__perf_snapshot_branch_stack; extern typeof(perf_snapshot_branch_stack_t) __SCT__perf_snapshot_branch_stack;;
# 11 "./include/linux/trace_events.h" 2
# 1 "./include/linux/tracepoint.h" 1
# 12 "./include/linux/trace_events.h" 2

struct trace_array;
struct array_buffer;
struct tracer;
struct dentry;
struct bpf_prog;
union bpf_attr;

const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
unsigned long flags,
const struct trace_print_flags *flag_array);

const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
const struct trace_print_flags *symbol_array);
# 38 "./include/linux/trace_events.h"
const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
unsigned int bitmask_size);

const char *trace_print_hex_seq(struct trace_seq *p,
const unsigned char *buf, int len,
bool concatenate);

const char *trace_print_array_seq(struct trace_seq *p,
const void *buf, int count,
size_t el_size);

const char *
trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);

struct trace_iterator;
struct trace_event;

int trace_raw_output_prep(struct trace_iterator *iter,
struct trace_event *event);
extern __attribute__((__format__(printf, 2, 3)))
void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);







struct trace_entry {
unsigned short type;
unsigned char flags;
unsigned char preempt_count;
int pid;
};
# 82 "./include/linux/trace_events.h"
struct trace_iterator {
struct trace_array *tr;
struct tracer *trace;
struct array_buffer *array_buffer;
void *private;
int cpu_file;
struct mutex mutex;
struct ring_buffer_iter **buffer_iter;
unsigned long iter_flags;
void *temp;
unsigned int temp_size;
char *fmt;
unsigned int fmt_size;


struct trace_seq tmp_seq;

cpumask_var_t started;


bool snapshot;


struct trace_seq seq;
struct trace_entry *ent;
unsigned long lost_events;
int leftover;
int ent_size;
int cpu;
u64 ts;

loff_t pos;
long idx;


};

enum trace_iter_flags {
TRACE_FILE_LAT_FMT = 1,
TRACE_FILE_ANNOTATE = 2,
TRACE_FILE_TIME_IN_NS = 4,
};


typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
int flags, struct trace_event *event);

struct trace_event_functions {
trace_print_func trace;
trace_print_func raw;
trace_print_func hex;
trace_print_func binary;
};

struct trace_event {
struct hlist_node node;
struct list_head list;
int type;
struct trace_event_functions *funcs;
};

extern int register_trace_event(struct trace_event *event);
extern int unregister_trace_event(struct trace_event *event);


enum print_line_t {
TRACE_TYPE_PARTIAL_LINE = 0,
TRACE_TYPE_HANDLED = 1,
TRACE_TYPE_UNHANDLED = 2,
TRACE_TYPE_NO_CONSUME = 3
};

enum print_line_t trace_handle_return(struct trace_seq *s);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void tracing_generic_entry_update(struct trace_entry *entry,
unsigned short type,
unsigned int trace_ctx)
{
entry->preempt_count = trace_ctx & 0xff;
entry->pid = get_current()->pid;
entry->type = type;
entry->flags = trace_ctx >> 16;
}

unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);

enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
TRACE_FLAG_NEED_RESCHED = 0x04,
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
TRACE_FLAG_NMI = 0x40,
TRACE_FLAG_BH_OFF = 0x80,
};


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
{
unsigned int irq_status = ({ ({ unsigned long __dummy; typeof(irqflags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(irqflags); }) ?
TRACE_FLAG_IRQS_OFF : 0;
return tracing_gen_ctx_irq_test(irq_status);
}
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int tracing_gen_ctx(void)
{
unsigned long irqflags;

do { ({ unsigned long __dummy; typeof(irqflags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); irqflags = arch_local_save_flags(); } while (0);
return tracing_gen_ctx_flags(irqflags);
}
# 205 "./include/linux/trace_events.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) unsigned int tracing_gen_ctx_dec(void)
{
unsigned int trace_ctx;

trace_ctx = tracing_gen_ctx();




if (0)
trace_ctx--;
return trace_ctx;
}

struct trace_event_file;

struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
struct trace_event_file *trace_file,
int type, unsigned long len,
unsigned int trace_ctx);




void tracing_record_taskinfo(struct task_struct *task, int flags);
void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
struct task_struct *next, int flags);

void tracing_record_cmdline(struct task_struct *task);
void tracing_record_tgid(struct task_struct *task);

int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);

struct event_filter;

enum trace_reg {
TRACE_REG_REGISTER,
TRACE_REG_UNREGISTER,

TRACE_REG_PERF_REGISTER,
TRACE_REG_PERF_UNREGISTER,
TRACE_REG_PERF_OPEN,
TRACE_REG_PERF_CLOSE,





TRACE_REG_PERF_ADD,
TRACE_REG_PERF_DEL,

};

struct trace_event_call;



struct trace_event_fields {
const char *type;
union {
struct {
const char *name;
const int size;
const int align;
const int is_signed;
const int filter_type;
};
int (*define_fields)(struct trace_event_call *);
};
};

struct trace_event_class {
const char *system;
void *probe;

void *perf_probe;

int (*reg)(struct trace_event_call *event,
enum trace_reg type, void *data);
struct trace_event_fields *fields_array;
struct list_head *(*get_fields)(struct trace_event_call *);
struct list_head fields;
int (*raw_init)(struct trace_event_call *);
};

extern int trace_event_reg(struct trace_event_call *event,
enum trace_reg type, void *data);

struct trace_event_buffer {
struct trace_buffer *buffer;
struct ring_buffer_event *event;
struct trace_event_file *trace_file;
void *entry;
unsigned int trace_ctx;
struct pt_regs *regs;
};

void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
struct trace_event_file *trace_file,
unsigned long len);

void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);

enum {
TRACE_EVENT_FL_FILTERED_BIT,
TRACE_EVENT_FL_CAP_ANY_BIT,
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT,
TRACE_EVENT_FL_DYNAMIC_BIT,
TRACE_EVENT_FL_KPROBE_BIT,
TRACE_EVENT_FL_UPROBE_BIT,
TRACE_EVENT_FL_EPROBE_BIT,
TRACE_EVENT_FL_CUSTOM_BIT,
};
# 337 "./include/linux/trace_events.h"
enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT),
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT),
TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT),
};



struct trace_event_call {
struct list_head list;
struct trace_event_class *class;
union {
char *name;

struct tracepoint *tp;
};
struct trace_event event;
char *print_fmt;
struct event_filter *filter;




union {
void *module;
atomic_t refcnt;
};
void *data;


int flags;


int perf_refcount;
struct hlist_head *perf_events;
struct bpf_prog_array *prog_array;

int (*perf_perm)(struct trace_event_call *,
struct perf_event *);

};


bool trace_event_dyn_try_get_ref(struct trace_event_call *call);
void trace_event_dyn_put_ref(struct trace_event_call *call);
bool trace_event_dyn_busy(struct trace_event_call *call);
# 406 "./include/linux/trace_events.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool trace_event_try_get_ref(struct trace_event_call *call)
{
if (call->flags & TRACE_EVENT_FL_DYNAMIC)
return trace_event_dyn_try_get_ref(call);
else
return try_module_get(call->module);
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void trace_event_put_ref(struct trace_event_call *call)
{
if (call->flags & TRACE_EVENT_FL_DYNAMIC)
trace_event_dyn_put_ref(call);
else
module_put(call->module);
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) bool bpf_prog_array_valid(struct trace_event_call *call)
{
# 442 "./include/linux/trace_events.h"
return !!({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_643(void) ; if (!((sizeof(call->prog_array) == sizeof(char) || sizeof(call->prog_array) == sizeof(short) || sizeof(call->prog_array) == sizeof(int) || sizeof(call->prog_array) == sizeof(long)) || sizeof(call->prog_array) == sizeof(long long))) __compiletime_assert_643(); } while (0); (*(const volatile typeof( _Generic((call->prog_array), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (call->prog_array))) *)&(call->prog_array)); });
}


static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const char *
trace_event_name(struct trace_event_call *call)
{
if (call->flags & TRACE_EVENT_FL_CUSTOM)
return call->name;
else if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
return call->tp ? call->tp->name : ((void *)0);
else
return call->name;
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) struct list_head *
trace_get_fields(struct trace_event_call *event_call)
{
if (!event_call->class->get_fields)
return &event_call->class->fields;
return event_call->class->get_fields(event_call);
}

struct trace_subsystem_dir;

enum {
EVENT_FILE_FL_ENABLED_BIT,
EVENT_FILE_FL_RECORDED_CMD_BIT,
EVENT_FILE_FL_RECORDED_TGID_BIT,
EVENT_FILE_FL_FILTERED_BIT,
EVENT_FILE_FL_NO_SET_FILTER_BIT,
EVENT_FILE_FL_SOFT_MODE_BIT,
EVENT_FILE_FL_SOFT_DISABLED_BIT,
EVENT_FILE_FL_TRIGGER_MODE_BIT,
EVENT_FILE_FL_TRIGGER_COND_BIT,
EVENT_FILE_FL_PID_FILTER_BIT,
EVENT_FILE_FL_WAS_ENABLED_BIT,
};

extern struct trace_event_file *trace_get_event_file(const char *instance,
const char *system,
const char *event);
extern void trace_put_event_file(struct trace_event_file *file);



enum dynevent_type {
DYNEVENT_TYPE_SYNTH = 1,
DYNEVENT_TYPE_KPROBE,
DYNEVENT_TYPE_NONE,
};

struct dynevent_cmd;

typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);

struct dynevent_cmd {
struct seq_buf seq;
const char *event_name;
unsigned int n_fields;
enum dynevent_type type;
dynevent_create_fn_t run_command;
void *private_data;
};

extern int dynevent_create(struct dynevent_cmd *cmd);

extern int synth_event_delete(const char *name);

extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
char *buf, int maxlen);

extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
const char *name,
struct module *mod, ...);




struct synth_field_desc {
const char *type;
const char *name;
};

extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
const char *name,
struct module *mod,
struct synth_field_desc *fields,
unsigned int n_fields);
extern int synth_event_create(const char *name,
struct synth_field_desc *fields,
unsigned int n_fields, struct module *mod);

extern int synth_event_add_field(struct dynevent_cmd *cmd,
const char *type,
const char *name);
extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
const char *type_name);
extern int synth_event_add_fields(struct dynevent_cmd *cmd,
struct synth_field_desc *fields,
unsigned int n_fields);




struct synth_event;

struct synth_event_trace_state {
struct trace_event_buffer fbuffer;
struct synth_trace_event *entry;
struct trace_buffer *buffer;
struct synth_event *event;
unsigned int cur_field;
unsigned int n_u64;
bool disabled;
bool add_next;
bool add_name;
};

extern int synth_event_trace(struct trace_event_file *file,
unsigned int n_vals, ...);
extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
unsigned int n_vals);
extern int synth_event_trace_start(struct trace_event_file *file,
struct synth_event_trace_state *trace_state);
extern int synth_event_add_next_val(u64 val,
struct synth_event_trace_state *trace_state);
extern int synth_event_add_val(const char *field_name, u64 val,
struct synth_event_trace_state *trace_state);
extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);

extern int kprobe_event_delete(const char *name);

extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
char *buf, int maxlen);







extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
bool kretprobe,
const char *name,
const char *loc, ...);







extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
# 618 "./include/linux/trace_events.h"
enum {
EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
};

struct trace_event_file {
struct list_head list;
struct trace_event_call *event_call;
struct event_filter *filter;
struct dentry *dir;
struct trace_array *tr;
struct trace_subsystem_dir *system;
struct list_head triggers;
# 657 "./include/linux/trace_events.h"
unsigned long flags;
atomic_t sm_ref;
atomic_t tm_ref;
};
# 687 "./include/linux/trace_events.h"
enum event_trigger_type {
ETT_NONE = (0),
ETT_TRACE_ONOFF = (1 << 0),
ETT_SNAPSHOT = (1 << 1),
ETT_STACKTRACE = (1 << 2),
ETT_EVENT_ENABLE = (1 << 3),
ETT_EVENT_HIST = (1 << 4),
ETT_HIST_ENABLE = (1 << 5),
ETT_EVENT_EPROBE = (1 << 6),
};

extern int filter_match_preds(struct event_filter *filter, void *rec);

extern enum event_trigger_type
event_triggers_call(struct trace_event_file *file,
struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event);
extern void
event_triggers_post_call(struct trace_event_file *file,
enum event_trigger_type tt);

bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);

bool __trace_trigger_soft_disabled(struct trace_event_file *file);
# 721 "./include/linux/trace_events.h"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((__always_inline__)) bool
trace_trigger_soft_disabled(struct trace_event_file *file)
{
unsigned long eflags = file->flags;

if (__builtin_expect(!!(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE | EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_PID_FILTER))), 1))


return false;

if (__builtin_expect(!!(eflags & EVENT_FILE_FL_TRIGGER_COND), 1))
return false;

return __trace_trigger_soft_disabled(file);
}


unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
void perf_event_detach_bpf_prog(struct perf_event *event);
int perf_event_query_prog_array(struct perf_event *event, void *info);
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
u64 *probe_offset, u64 *probe_addr);
int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
# 798 "./include/linux/trace_events.h"
enum {
FILTER_OTHER = 0,
FILTER_STATIC_STRING,
FILTER_DYN_STRING,
FILTER_RDYN_STRING,
FILTER_PTR_STRING,
FILTER_TRACE_FN,
FILTER_COMM,
FILTER_CPU,
};

extern int trace_event_raw_init(struct trace_event_call *call);
extern int trace_define_field(struct trace_event_call *call, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type);
extern int trace_add_event_call(struct trace_event_call *call);
extern int trace_remove_event_call(struct trace_event_call *call);
extern int trace_event_get_offsets(struct trace_event_call *call);



int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
int trace_set_clr_event(const char *system, const char *event, int set);
int trace_array_set_clr_event(struct trace_array *tr, const char *system,
const char *event, bool enable);
# 843 "./include/linux/trace_events.h"
struct perf_event;

extern __attribute__((section(".data..percpu" ""))) __typeof__(struct pt_regs) perf_trace_regs;
extern __attribute__((section(".data..percpu" ""))) __typeof__(int) bpf_kprobe_override;

extern int perf_trace_init(struct perf_event *event);
extern void perf_trace_destroy(struct perf_event *event);
extern int perf_trace_add(struct perf_event *event, int flags);
extern void perf_trace_del(struct perf_event *event, int flags);

extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe);
extern void perf_kprobe_destroy(struct perf_event *event);
extern int bpf_get_kprobe_info(const struct perf_event *event,
u32 *fd_type, const char **symbol,
u64 *probe_offset, u64 *probe_addr,
bool perf_type_tracepoint);


extern int perf_uprobe_init(struct perf_event *event,
unsigned long ref_ctr_offset, bool is_retprobe);
extern void perf_uprobe_destroy(struct perf_event *event);
extern int bpf_get_uprobe_info(const struct perf_event *event,
u32 *fd_type, const char **filename,
u64 *probe_offset, bool perf_type_tracepoint);

extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
void perf_trace_buf_update(void *record, u16 type);
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);

int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
void perf_event_free_bpf_prog(struct perf_event *event);

void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
u64 arg3);
void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
u64 arg3, u64 arg4);
void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5);
void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6);
void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8);
void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8, u64 arg9);
void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8, u64 arg9, u64 arg10);
void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8, u64 arg9, u64 arg10, u64 arg11);
void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
struct trace_event_call *call, u64 count,
struct pt_regs *regs, struct hlist_head *head,
struct task_struct *task);

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
u64 count, struct pt_regs *regs, void *head,
struct task_struct *task)
{
perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
}
# 22 "./include/trace/trace_events.h" 2





# 1 "./include/trace/stages/init.h" 1
# 11 "./include/trace/stages/init.h"
static const char str__fib6__trace_system_name[] = "fib6";
# 28 "./include/trace/trace_events.h" 2
# 48 "./include/trace/trace_events.h"
# 1 "./include/trace/stages/stage1_struct_define.h" 1
# 49 "./include/trace/trace_events.h" 2
# 94 "./include/trace/trace_events.h"
# 1 "./include/trace/events/fib6.h" 1
# 11 "./include/trace/events/fib6.h"
# 1 "./include/linux/tracepoint.h" 1
# 12 "./include/trace/events/fib6.h" 2

struct trace_event_raw_fib6_table_lookup { struct trace_entry ent; u32 tb_id; int err; int oif; int iif; __u8 tos; __u8 scope; __u8 flags; __u8 src[16]; __u8 dst[16]; u16 sport; u16 dport; u8 proto; u8 rt_type; u32 __data_loc_name; __u8 gw[16]; char __data[]; }; static struct trace_event_class event_class_fib6_table_lookup;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_fib6_table_lookup;;
# 92 "./include/trace/events/fib6.h"
# 1 "./include/trace/define_trace.h" 1
# 93 "./include/trace/events/fib6.h" 2
# 95 "./include/trace/trace_events.h" 2
# 112 "./include/trace/trace_events.h"
# 1 "./include/trace/stages/stage2_data_offsets.h" 1
# 113 "./include/trace/trace_events.h" 2
# 132 "./include/trace/trace_events.h"
# 1 "./include/trace/events/fib6.h" 1
# 11 "./include/trace/events/fib6.h"
# 1 "./include/linux/tracepoint.h" 1
# 12 "./include/trace/events/fib6.h" 2

struct trace_event_data_offsets_fib6_table_lookup { u32 name;; };; ;;
# 92 "./include/trace/events/fib6.h"
# 1 "./include/trace/define_trace.h" 1
# 93 "./include/trace/events/fib6.h" 2
# 133 "./include/trace/trace_events.h" 2
# 184 "./include/trace/trace_events.h"
# 1 "./include/trace/stages/stage3_trace_output.h" 1
# 185 "./include/trace/trace_events.h" 2
# 237 "./include/trace/trace_events.h"
# 1 "./include/trace/events/fib6.h" 1
# 11 "./include/trace/events/fib6.h"
# 1 "./include/linux/tracepoint.h" 1
# 12 "./include/trace/events/fib6.h" 2

static __attribute__((patchable_function_entry(0, 0))) enum print_line_t trace_raw_output_fib6_table_lookup(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_fib6_table_lookup *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_event_printf(iter, "table %3u oif %d iif %d proto %u %pI6c/%u -> %pI6c/%u tos %d scope %d flags %x ==> dev %s gw %pI6c err %d" "\n", field->tb_id, field->oif, field->iif, field->proto, field->src, field->sport, field->dst, field->dport, field->tos, field->scope, field->flags, ((char *)((void *)field + (field->__data_loc_name & 0xffff))), field->gw, field->err); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_fib6_table_lookup = { .trace = trace_raw_output_fib6_table_lookup, };; ;;
# 92 "./include/trace/events/fib6.h"
# 1 "./include/trace/define_trace.h" 1
# 93 "./include/trace/events/fib6.h" 2
# 238 "./include/trace/trace_events.h" 2

# 1 "./include/trace/stages/stage4_event_fields.h" 1
# 240 "./include/trace/trace_events.h" 2
# 250 "./include/trace/trace_events.h"
# 1 "./include/trace/events/fib6.h" 1
# 11 "./include/trace/events/fib6.h"
# 1 "./include/linux/tracepoint.h" 1
# 12 "./include/trace/events/fib6.h" 2

static struct trace_event_fields trace_event_fields_fib6_table_lookup[] = { { .type = "u32", .name = "tb_id", .size = sizeof(u32), .align = __alignof__(u32), .is_signed = (((u32)(-1)) < (u32)1), .filter_type = FILTER_OTHER }, { .type = "int", .name = "err", .size = sizeof(int), .align = __alignof__(int), .is_signed = (((int)(-1)) < (int)1), .filter_type = FILTER_OTHER }, { .type = "int", .name = "oif", .size = sizeof(int), .align = __alignof__(int), .is_signed = (((int)(-1)) < (int)1), .filter_type = FILTER_OTHER }, { .type = "int", .name = "iif", .size = sizeof(int), .align = __alignof__(int), .is_signed = (((int)(-1)) < (int)1), .filter_type = FILTER_OTHER }, { .type = "__u8", .name = "tos", .size = sizeof(__u8), .align = __alignof__(__u8), .is_signed = (((__u8)(-1)) < (__u8)1), .filter_type = FILTER_OTHER }, { .type = "__u8", .name = "scope", .size = sizeof(__u8), .align = __alignof__(__u8), .is_signed = (((__u8)(-1)) < (__u8)1), .filter_type = FILTER_OTHER }, { .type = "__u8", .name = "flags", .size = sizeof(__u8), .align = __alignof__(__u8), .is_signed = (((__u8)(-1)) < (__u8)1), .filter_type = FILTER_OTHER }, { .type = "__u8""[""16""]", .name = "src", .size = sizeof(__u8[16]), .align = __alignof__(__u8), .is_signed = (((__u8)(-1)) < (__u8)1), .filter_type = FILTER_OTHER }, { .type = "__u8""[""16""]", .name = "dst", .size = sizeof(__u8[16]), .align = __alignof__(__u8), .is_signed = (((__u8)(-1)) < (__u8)1), .filter_type = FILTER_OTHER }, { .type = "u16", .name = "sport", .size = sizeof(u16), .align = __alignof__(u16), .is_signed = (((u16)(-1)) < (u16)1), .filter_type = FILTER_OTHER }, { .type = "u16", .name = "dport", .size = sizeof(u16), .align = __alignof__(u16), .is_signed = (((u16)(-1)) < (u16)1), .filter_type = FILTER_OTHER }, { .type = "u8", .name = "proto", .size = sizeof(u8), .align = __alignof__(u8), .is_signed = (((u8)(-1)) < (u8)1), .filter_type = FILTER_OTHER }, { .type = "u8", .name = "rt_type", .size = sizeof(u8), .align = __alignof__(u8), .is_signed = (((u8)(-1)) < (u8)1), .filter_type = FILTER_OTHER }, { .type = "__data_loc " "char" "[]", .name = "name", .size = 4, .align = 4, .is_signed = (((char)(-1)) < (char)1), .filter_type = FILTER_OTHER }, { .type = "__u8""[""16""]", .name = "gw", .size = sizeof(__u8[16]), .align = __alignof__(__u8), .is_signed = (((__u8)(-1)) < (__u8)1), .filter_type = FILTER_OTHER }, {} };; ;;
# 92 "./include/trace/events/fib6.h"
# 1 "./include/trace/define_trace.h" 1
# 93 "./include/trace/events/fib6.h" 2
# 251 "./include/trace/trace_events.h" 2

# 1 "./include/trace/stages/stage5_get_offsets.h" 1
# 253 "./include/trace/trace_events.h" 2
# 268 "./include/trace/trace_events.h"
# 1 "./include/trace/events/fib6.h" 1
# 11 "./include/trace/events/fib6.h"
# 1 "./include/linux/tracepoint.h" 1
# 12 "./include/trace/events/fib6.h" 2

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) __attribute__((patchable_function_entry(0, 0))) int trace_event_get_offsets_fib6_table_lookup( struct trace_event_data_offsets_fib6_table_lookup *__data_offsets, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_fib6_table_lookup __attribute__((__unused__)) *entry; __item_length = (16) * sizeof(char); __data_offsets->name = __data_size + __builtin_offsetof(typeof(*entry), __data); __data_offsets->name |= __item_length << 16; __data_size += __item_length;; return __data_size; }; ;;
# 92 "./include/trace/events/fib6.h"
# 1 "./include/trace/define_trace.h" 1
# 93 "./include/trace/events/fib6.h" 2
# 269 "./include/trace/trace_events.h" 2
# 375 "./include/trace/trace_events.h"
# 1 "./include/trace/stages/stage6_event_callback.h" 1
# 376 "./include/trace/trace_events.h" 2
# 419 "./include/trace/trace_events.h"
# 1 "./include/trace/events/fib6.h" 1
# 11 "./include/trace/events/fib6.h"
# 1 "./include/linux/tracepoint.h" 1
# 12 "./include/trace/events/fib6.h" 2

static __attribute__((patchable_function_entry(0, 0))) void trace_event_raw_event_fib6_table_lookup(void *__data, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_fib6_table_lookup __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_fib6_table_lookup *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_fib6_table_lookup(&__data_offsets, net, res, table, flp); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; entry->__data_loc_name = __data_offsets.name; { struct in6_addr *in6; entry->tb_id = table->tb6_id; entry->err = ip6_rt_type_to_error(res->fib6_type); entry->oif = flp->__fl_common.flowic_oif; entry->iif = flp->__fl_common.flowic_iif; entry->tos = ip6_tclass(flp->flowlabel); entry->scope = flp->__fl_common.flowic_scope; entry->flags = flp->__fl_common.flowic_flags; in6 = (struct in6_addr *)entry->src; *in6 = flp->saddr; in6 = (struct in6_addr *)entry->dst; *in6 = flp->daddr; entry->proto = flp->__fl_common.flowic_proto; if (entry->proto == IPPROTO_TCP || entry->proto == IPPROTO_UDP) { entry->sport = (__builtin_constant_p((__u16)(( __u16)(__be16)(flp->uli.ports.sport))) ? ((__u16)( (((__u16)(( __u16)(__be16)(flp->uli.ports.sport)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(flp->uli.ports.sport)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(flp->uli.ports.sport))); entry->dport = (__builtin_constant_p((__u16)(( __u16)(__be16)(flp->uli.ports.dport))) ? ((__u16)( (((__u16)(( __u16)(__be16)(flp->uli.ports.dport)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(flp->uli.ports.dport)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(flp->uli.ports.dport))); } else { entry->sport = 0; entry->dport = 0; } if (res->nh && res->nh->nh_common.nhc_dev) { strcpy(((char *)((void *)entry + (entry->__data_loc_name & 0xffff))), (res->nh->nh_common.nhc_dev) ? (const char *)(res->nh->nh_common.nhc_dev) : "(null)");; } else { strcpy(((char *)((void *)entry + (entry->__data_loc_name & 0xffff))), ("-") ? (const char *)("-") : "(null)");; } if (res->f6i == net->ipv6.fib6_null_entry) { struct in6_addr in6_zero = {}; in6 = (struct in6_addr *)entry->gw; *in6 = in6_zero; } else if (res->nh) { in6 = (struct in6_addr *)entry->gw; *in6 = res->nh->nh_common.nhc_gw.ipv6; }; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void ftrace_test_probe_fib6_table_lookup(void) { check_trace_callback_type_fib6_table_lookup(trace_event_raw_event_fib6_table_lookup); };;
# 92 "./include/trace/events/fib6.h"
# 1 "./include/trace/define_trace.h" 1
# 93 "./include/trace/events/fib6.h" 2
# 420 "./include/trace/trace_events.h" 2

# 1 "./include/trace/stages/stage7_class_define.h" 1
# 422 "./include/trace/trace_events.h" 2
# 469 "./include/trace/trace_events.h"
# 1 "./include/trace/events/fib6.h" 1
# 11 "./include/trace/events/fib6.h"
# 1 "./include/linux/tracepoint.h" 1
# 12 "./include/trace/events/fib6.h" 2

static __attribute__((patchable_function_entry(0, 0))) void perf_trace_fib6_table_lookup(void *__data, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp);; static char print_fmt_fib6_table_lookup[] = "\"" "table %3u oif %d iif %d proto %u %pI6c/%u -> %pI6c/%u tos %d scope %d flags %x ==> dev %s gw %pI6c err %d" "\", " "REC->tb_id, REC->oif, REC->iif, REC->proto, REC->src, REC->sport, REC->dst, REC->dport, REC->tos, REC->scope, REC->flags, __get_str(name), REC->gw, REC->err"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_fib6_table_lookup = { .system = str__fib6__trace_system_name, .fields_array = trace_event_fields_fib6_table_lookup, .fields = { &(event_class_fib6_table_lookup.fields), &(event_class_fib6_table_lookup.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_fib6_table_lookup, .reg = trace_event_reg, .perf_probe = perf_trace_fib6_table_lookup, };; static struct trace_event_call __attribute__((__used__)) event_fib6_table_lookup = { .class = &event_class_fib6_table_lookup, { .tp = &__tracepoint_fib6_table_lookup, }, .event.funcs = &trace_event_type_funcs_fib6_table_lookup, .print_fmt = print_fmt_fib6_table_lookup, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((__section__("_ftrace_events"))) *__event_fib6_table_lookup = &event_fib6_table_lookup;;
# 92 "./include/trace/events/fib6.h"
# 1 "./include/trace/define_trace.h" 1
# 93 "./include/trace/events/fib6.h" 2
# 470 "./include/trace/trace_events.h" 2
# 103 "./include/trace/define_trace.h" 2
# 1 "./include/trace/perf.h" 1
# 113 "./include/trace/perf.h"
# 1 "./include/trace/events/fib6.h" 1
# 11 "./include/trace/events/fib6.h"
# 1 "./include/linux/tracepoint.h" 1
# 12 "./include/trace/events/fib6.h" 2

static __attribute__((patchable_function_entry(0, 0))) void perf_trace_fib6_table_lookup(void *__data, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_fib6_table_lookup __attribute__((__unused__)) __data_offsets; struct trace_event_raw_fib6_table_lookup *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_fib6_table_lookup(&__data_offsets, net, res, table, flp); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(event_call->perf_events)) *)(event_call->perf_events)); (typeof((typeof(*(event_call->perf_events)) *)(event_call->perf_events))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); entry->__data_loc_name = __data_offsets.name; { struct in6_addr *in6; entry->tb_id = table->tb6_id; entry->err = ip6_rt_type_to_error(res->fib6_type); entry->oif = flp->__fl_common.flowic_oif; entry->iif = flp->__fl_common.flowic_iif; entry->tos = ip6_tclass(flp->flowlabel); entry->scope = flp->__fl_common.flowic_scope; entry->flags = flp->__fl_common.flowic_flags; in6 = (struct in6_addr *)entry->src; *in6 = flp->saddr; in6 = (struct in6_addr *)entry->dst; *in6 = flp->daddr; entry->proto = flp->__fl_common.flowic_proto; if (entry->proto == IPPROTO_TCP || entry->proto == IPPROTO_UDP) { entry->sport = (__builtin_constant_p((__u16)(( __u16)(__be16)(flp->uli.ports.sport))) ? ((__u16)( (((__u16)(( __u16)(__be16)(flp->uli.ports.sport)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(flp->uli.ports.sport)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(flp->uli.ports.sport))); entry->dport = (__builtin_constant_p((__u16)(( __u16)(__be16)(flp->uli.ports.dport))) ? ((__u16)( (((__u16)(( __u16)(__be16)(flp->uli.ports.dport)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(flp->uli.ports.dport)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(flp->uli.ports.dport))); } else { entry->sport = 0; entry->dport = 0; } if (res->nh && res->nh->nh_common.nhc_dev) { strcpy(((char *)((void *)entry + (entry->__data_loc_name & 0xffff))), (res->nh->nh_common.nhc_dev) ? (const char *)(res->nh->nh_common.nhc_dev) : "(null)");; } else { strcpy(((char *)((void *)entry + (entry->__data_loc_name & 0xffff))), ("-") ? (const char *)("-") : "(null)");; } if (res->f6i == net->ipv6.fib6_null_entry) { struct in6_addr in6_zero = {}; in6 = (struct in6_addr *)entry->gw; *in6 = in6_zero; } else if (res->nh) { in6 = (struct in6_addr *)entry->gw; *in6 = res->nh->nh_common.nhc_gw.ipv6; }; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void perf_test_probe_fib6_table_lookup(void) { check_trace_callback_type_fib6_table_lookup(perf_trace_fib6_table_lookup); };;
# 92 "./include/trace/events/fib6.h"
# 1 "./include/trace/define_trace.h" 1
# 93 "./include/trace/events/fib6.h" 2
# 114 "./include/trace/perf.h" 2
# 104 "./include/trace/define_trace.h" 2
# 1 "./include/trace/bpf_probe.h" 1
# 153 "./include/trace/bpf_probe.h"
# 1 "./include/trace/events/fib6.h" 1
# 11 "./include/trace/events/fib6.h"
# 1 "./include/linux/tracepoint.h" 1
# 12 "./include/trace/events/fib6.h" 2

static __attribute__((patchable_function_entry(0, 0))) void __bpf_trace_fib6_table_lookup(void *__data, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp) { struct bpf_prog *prog = __data; bpf_trace_run4(prog, ({ typeof(net) __src = (net); __typeof__(__builtin_choose_expr(sizeof(net) == 1, (u8)1, __builtin_choose_expr(sizeof(net) == 2, (u16)2, __builtin_choose_expr(sizeof(net) == 4, (u32)3, __builtin_choose_expr(sizeof(net) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(res) __src = (res); __typeof__(__builtin_choose_expr(sizeof(res) == 1, (u8)1, __builtin_choose_expr(sizeof(res) == 2, (u16)2, __builtin_choose_expr(sizeof(res) == 4, (u32)3, __builtin_choose_expr(sizeof(res) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(table) __src = (table); __typeof__(__builtin_choose_expr(sizeof(table) == 1, (u8)1, __builtin_choose_expr(sizeof(table) == 2, (u16)2, __builtin_choose_expr(sizeof(table) == 4, (u32)3, __builtin_choose_expr(sizeof(table) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(flp) __src = (flp); __typeof__(__builtin_choose_expr(sizeof(flp) == 1, (u8)1, __builtin_choose_expr(sizeof(flp) == 2, (u16)2, __builtin_choose_expr(sizeof(flp) == 4, (u32)3, __builtin_choose_expr(sizeof(flp) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void bpf_test_probe_fib6_table_lookup(void) { check_trace_callback_type_fib6_table_lookup(__bpf_trace_fib6_table_lookup); } typedef void (*btf_trace_fib6_table_lookup)(void *__data, const struct net *net, const struct fib6_result *res, struct fib6_table *table, const struct flowi6 *flp); static union { struct bpf_raw_event_map event; btf_trace_fib6_table_lookup handler; } __bpf_trace_tp_map_fib6_table_lookup __attribute__((__used__)) __attribute__((__section__("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_fib6_table_lookup, .bpf_func = __bpf_trace_fib6_table_lookup, .num_args = 4, .writable_size = 0, }, };;;
# 92 "./include/trace/events/fib6.h"
# 1 "./include/trace/define_trace.h" 1
# 93 "./include/trace/events/fib6.h" 2
# 154 "./include/trace/bpf_probe.h" 2
# 105 "./include/trace/define_trace.h" 2
# 93 "./include/trace/events/fib6.h" 2
# 75 "net/ipv6/route.c" 2
extern typeof(__tracepoint_fib6_table_lookup) __tracepoint_fib6_table_lookup; extern const char __kstrtab___tracepoint_fib6_table_lookup[]; extern const char __kstrtabns___tracepoint_fib6_table_lookup[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "__tracepoint_fib6_table_lookup" ": \n" " .asciz \"" "__tracepoint_fib6_table_lookup" "\" \n" "__kstrtabns_" "__tracepoint_fib6_table_lookup" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab___tracepoint_fib6_table_lookup __attribute__((section("___ksymtab" "_gpl" "+" "__tracepoint_fib6_table_lookup"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&__tracepoint_fib6_table_lookup, __kstrtab___tracepoint_fib6_table_lookup, __kstrtabns___tracepoint_fib6_table_lookup }; extern typeof(__traceiter_fib6_table_lookup) __traceiter_fib6_table_lookup; extern const char __kstrtab___traceiter_fib6_table_lookup[]; extern const char __kstrtabns___traceiter_fib6_table_lookup[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "__traceiter_fib6_table_lookup" ": \n" " .asciz \"" "__traceiter_fib6_table_lookup" "\" \n" "__kstrtabns_" "__traceiter_fib6_table_lookup" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab___traceiter_fib6_table_lookup __attribute__((section("___ksymtab" "_gpl" "+" "__traceiter_fib6_table_lookup"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&__traceiter_fib6_table_lookup, __kstrtab___traceiter_fib6_table_lookup, __kstrtabns___traceiter_fib6_table_lookup }; extern typeof(__SCK__tp_func_fib6_table_lookup) __SCK__tp_func_fib6_table_lookup; extern const char __kstrtab___SCK__tp_func_fib6_table_lookup[]; extern const char __kstrtabns___SCK__tp_func_fib6_table_lookup[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "__SCK__tp_func_fib6_table_lookup" ": \n" " .asciz \"" "__SCK__tp_func_fib6_table_lookup" "\" \n" "__kstrtabns_" "__SCK__tp_func_fib6_table_lookup" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab___SCK__tp_func_fib6_table_lookup __attribute__((section("___ksymtab" "_gpl" "+" "__SCK__tp_func_fib6_table_lookup"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&__SCK__tp_func_fib6_table_lookup, __kstrtab___SCK__tp_func_fib6_table_lookup, __kstrtabns___SCK__tp_func_fib6_table_lookup };


enum rt6_nud_state {
RT6_NUD_FAIL_HARD = -3,
RT6_NUD_FAIL_PROBE = -2,
RT6_NUD_FAIL_DO_RR = -1,
RT6_NUD_SUCCEED = 1
};

static
struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
static
unsigned int ip6_mtu(const struct dst_entry *dst);
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void ip6_dst_destroy(struct dst_entry *);
static void ip6_dst_ifdown(struct dst_entry *,
struct net_device *dev, int how);
static int ip6_dst_gc(struct dst_ops *ops);

static int ip6_pkt_discard(struct sk_buff *skb);
static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static int ip6_pkt_prohibit(struct sk_buff *skb);
static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static void ip6_link_failure(struct sk_buff *skb);
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
bool confirm_neigh);
static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
int strict);
static size_t rt6_nlmsg_size(struct fib6_info *f6i);
static int rt6_fill_node(struct net *net, struct sk_buff *skb,
struct fib6_info *rt, struct dst_entry *dst,
struct in6_addr *dest, struct in6_addr *src,
int iif, int type, u32 portid, u32 seq,
unsigned int flags);
static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
const struct in6_addr *daddr,
const struct in6_addr *saddr);
# 130 "net/ipv6/route.c"
struct uncached_list {
spinlock_t lock;
struct list_head head;
struct list_head quarantine;
};

static __attribute__((section(".data..percpu" "..shared_aligned"))) __typeof__(struct uncached_list) rt6_uncached_list __attribute__((__aligned__((1 << 6))));

void rt6_uncached_list_add(struct rt6_info *rt)
{
struct uncached_list *ul = ({ do { const void *__vpp_verify = (typeof((&rt6_uncached_list) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&rt6_uncached_list)) *)(&rt6_uncached_list)); (typeof((typeof(*(&rt6_uncached_list)) *)(&rt6_uncached_list))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });

rt->rt6i_uncached_list = ul;

spin_lock_bh(&ul->lock);
list_add_tail(&rt->rt6i_uncached, &ul->head);
spin_unlock_bh(&ul->lock);
}

void rt6_uncached_list_del(struct rt6_info *rt)
{
if (!list_empty(&rt->rt6i_uncached)) {
struct uncached_list *ul = rt->rt6i_uncached_list;

spin_lock_bh(&ul->lock);
list_del_init(&rt->rt6i_uncached);
spin_unlock_bh(&ul->lock);
}
}

static void rt6_uncached_list_flush_dev(struct net_device *dev)
{
int cpu;

for (((cpu)) = -1; ((cpu)) = cpumask_next(((cpu)), (((const struct cpumask *)&__cpu_possible_mask))), ((cpu)) < nr_cpu_ids;) {
struct uncached_list *ul = ({ do { const void *__vpp_verify = (typeof((&rt6_uncached_list) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&rt6_uncached_list))) *)((&rt6_uncached_list))); (typeof((typeof(*((&rt6_uncached_list))) *)((&rt6_uncached_list)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); });
struct rt6_info *rt, *safe;

if (list_empty(&ul->head))
continue;

spin_lock_bh(&ul->lock);
for (rt = ({ void *__mptr = (void *)((&ul->head)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&ul->head)->next)), typeof(((typeof(*rt) *)0)->rt6i_uncached)) || __builtin_types_compatible_p(typeof(*((&ul->head)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*rt) *)(__mptr - __builtin_offsetof(typeof(*rt), rt6i_uncached))); }), safe = ({ void *__mptr = (void *)((rt)->rt6i_uncached.next); _Static_assert(__builtin_types_compatible_p(typeof(*((rt)->rt6i_uncached.next)), typeof(((typeof(*(rt)) *)0)->rt6i_uncached)) || __builtin_types_compatible_p(typeof(*((rt)->rt6i_uncached.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(rt)) *)(__mptr - __builtin_offsetof(typeof(*(rt)), rt6i_uncached))); }); !(&rt->rt6i_uncached == (&ul->head)); rt = safe, safe = ({ void *__mptr = (void *)((safe)->rt6i_uncached.next); _Static_assert(__builtin_types_compatible_p(typeof(*((safe)->rt6i_uncached.next)), typeof(((typeof(*(safe)) *)0)->rt6i_uncached)) || __builtin_types_compatible_p(typeof(*((safe)->rt6i_uncached.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(safe)) *)(__mptr - __builtin_offsetof(typeof(*(safe)), rt6i_uncached))); })) {
struct inet6_dev *rt_idev = rt->rt6i_idev;
struct net_device *rt_dev = rt->dst.dev;
bool handled = false;

if (rt_idev->dev == dev) {
rt->rt6i_idev = in6_dev_get(blackhole_netdev);
in6_dev_put(rt_idev);
handled = true;
}

if (rt_dev == dev) {
rt->dst.dev = blackhole_netdev;
dev_replace_track(rt_dev, blackhole_netdev,
&rt->dst.dev_tracker,
((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
handled = true;
}
if (handled)
list_move(&rt->rt6i_uncached,
&ul->quarantine);
}
spin_unlock_bh(&ul->lock);
}
}

static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) const void *choose_neigh_daddr(const struct in6_addr *p,
struct sk_buff *skb,
const void *daddr)
{
if (!ipv6_addr_any(p))
return (const void *) p;
else if (skb)
return &ipv6_hdr(skb)->daddr;
return daddr;
}

struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
struct net_device *dev,
struct sk_buff *skb,
const void *daddr)
{
struct neighbour *n;

daddr = choose_neigh_daddr(gw, skb, daddr);
n = __ipv6_neigh_lookup(dev, daddr);
if (n)
return n;

n = neigh_create(&nd_tbl, daddr, dev);
return IS_ERR(n) ? ((void *)0) : n;
}

static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr)
{
const struct rt6_info *rt = ({ void *__mptr = (void *)(dst); _Static_assert(__builtin_types_compatible_p(typeof(*(dst)), typeof(((struct rt6_info *)0)->dst)) || __builtin_types_compatible_p(typeof(*(dst)), typeof(void)), "pointer type mismatch in container_of()"); ((struct rt6_info *)(__mptr - __builtin_offsetof(struct rt6_info, dst))); });

return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
dst->dev, skb, daddr);
}

static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
struct net_device *dev = dst->dev;
struct rt6_info *rt = (struct rt6_info *)dst;

daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), ((void *)0), daddr);
if (!daddr)
return;
if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
return;
if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
return;
__ipv6_confirm_neigh(dev, daddr);
}

static struct dst_ops ip6_dst_ops_template = {
.family = 10,
.gc = ip6_dst_gc,
.gc_thresh = 1024,
.check = ip6_dst_check,
.default_advmss = ip6_default_advmss,
.mtu = ip6_mtu,
.cow_metrics = dst_cow_metrics_generic,
.destroy = ip6_dst_destroy,
.ifdown = ip6_dst_ifdown,
.negative_advice = ip6_negative_advice,
.link_failure = ip6_link_failure,
.update_pmtu = ip6_rt_update_pmtu,
.redirect = rt6_do_redirect,
.local_out = __ip6_local_out,
.neigh_lookup = ip6_dst_neigh_lookup,
.confirm_neigh = ip6_confirm_neigh,
};

static struct dst_ops ip6_dst_blackhole_ops = {
.family = 10,
.default_advmss = ip6_default_advmss,
.neigh_lookup = ip6_dst_neigh_lookup,
.check = ip6_dst_check,
.destroy = ip6_dst_destroy,
.cow_metrics = dst_cow_metrics_generic,
.update_pmtu = dst_blackhole_update_pmtu,
.redirect = dst_blackhole_redirect,
.mtu = dst_blackhole_mtu,
};

static const u32 ip6_template_metrics[(__RTAX_MAX - 1)] = {
[RTAX_HOPLIMIT - 1] = 0,
};

static const struct fib6_info fib6_null_entry_template = {
.fib6_flags = (0x0200 | 0x00200000),
.fib6_protocol = 2,
.fib6_metric = ~(u32)0,
.fib6_ref = { .refs = { (1) }, },
.fib6_type = RTN_UNREACHABLE,
.fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
};

static const struct rt6_info ip6_null_entry_template = {
.dst = {
.__refcnt = { (1) },
.__use = 1,
.obsolete = -1,
.error = -101,
.input = ip6_pkt_discard,
.output = ip6_pkt_discard_out,
},
.rt6i_flags = (0x0200 | 0x00200000),
};
# 334 "net/ipv6/route.c"
static void rt6_info_init(struct rt6_info *rt)
{
({ u8 *__ptr = (u8 *)(rt); typeof(0) __val = (0); memset(__ptr + (__builtin_offsetof(typeof(*(rt)), dst) + sizeof((((typeof(*(rt)) *)0)->dst))), __val, sizeof(*(rt)) - (__builtin_offsetof(typeof(*(rt)), dst) + sizeof((((typeof(*(rt)) *)0)->dst)))); });
INIT_LIST_HEAD(&rt->rt6i_uncached);
}


struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
int flags)
{
struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
1, -1, flags);

if (rt) {
rt6_info_init(rt);
atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
}

return rt;
}
extern typeof(ip6_dst_alloc) ip6_dst_alloc; extern const char __kstrtab_ip6_dst_alloc[]; extern const char __kstrtabns_ip6_dst_alloc[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "ip6_dst_alloc" ": \n" " .asciz \"" "ip6_dst_alloc" "\" \n" "__kstrtabns_" "ip6_dst_alloc" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab_ip6_dst_alloc __attribute__((section("___ksymtab" "" "+" "ip6_dst_alloc"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&ip6_dst_alloc, __kstrtab_ip6_dst_alloc, __kstrtabns_ip6_dst_alloc };

static void ip6_dst_destroy(struct dst_entry *dst)
{
struct rt6_info *rt = (struct rt6_info *)dst;
struct fib6_info *from;
struct inet6_dev *idev;

ip_dst_metrics_put(dst);
rt6_uncached_list_del(rt);

idev = rt->rt6i_idev;
if (idev) {
rt->rt6i_idev = ((void *)0);
in6_dev_put(idev);
}

from = ({ typeof(( struct fib6_info **)&rt->from) __ai_ptr = (( struct fib6_info **)&rt->from); do { } while (0); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__(*(__ai_ptr)) _x_ = (((void *)0)); (__typeof__(*(__ai_ptr))) ({ __typeof__((__ai_ptr)) __ptr = ((__ai_ptr)); __typeof__(_x_) __new = (_x_); __typeof__(*((__ai_ptr))) __ret; switch (sizeof(*(__ai_ptr))) { case 4: __asm__ __volatile__ ( " amoswap.w.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_644(void) ; if (!(!(1))) __compiletime_assert_644(); } while (0); } __ret; }); }); });
fib6_info_release(from);
}

static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
int how)
{
struct rt6_info *rt = (struct rt6_info *)dst;
struct inet6_dev *idev = rt->rt6i_idev;

if (idev && idev->dev != blackhole_netdev) {
struct inet6_dev *blackhole_idev = in6_dev_get(blackhole_netdev);

if (blackhole_idev) {
rt->rt6i_idev = blackhole_idev;
in6_dev_put(idev);
}
}
}

static bool __rt6_check_expired(const struct rt6_info *rt)
{
if (rt->rt6i_flags & 0x00400000)
return (({ unsigned long __dummy; typeof(jiffies) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy; typeof(rt->dst.expires) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ((long)((rt->dst.expires) - (jiffies)) < 0));
else
return false;
}

static bool rt6_check_expired(const struct rt6_info *rt)
{
struct fib6_info *from;

from = ({ typeof(*(rt->from)) *__UNIQUE_ID_rcu645 = (typeof(*(rt->from)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_646(void) ; if (!((sizeof((rt->from)) == sizeof(char) || sizeof((rt->from)) == sizeof(short) || sizeof((rt->from)) == sizeof(int) || sizeof((rt->from)) == sizeof(long)) || sizeof((rt->from)) == sizeof(long long))) __compiletime_assert_646(); } while (0); (*(const volatile typeof( _Generic(((rt->from)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->from)))) *)&((rt->from))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->from)) *)(__UNIQUE_ID_rcu645)); });

if (rt->rt6i_flags & 0x00400000) {
if ((({ unsigned long __dummy; typeof(jiffies) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy; typeof(rt->dst.expires) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ((long)((rt->dst.expires) - (jiffies)) < 0)))
return true;
} else if (from) {
return rt->dst.obsolete != -1 ||
fib6_check_expired(from);
}
return false;
}

void fib6_select_path(const struct net *net, struct fib6_result *res,
struct flowi6 *fl6, int oif, bool have_oif_match,
const struct sk_buff *skb, int strict)
{
struct fib6_info *sibling, *next_sibling;
struct fib6_info *match = res->f6i;

if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
goto out;

if (match->nh && have_oif_match && res->nh)
return;




if (!fl6->mp_hash &&
(!match->nh || nexthop_is_multipath(match->nh)))
fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, ((void *)0));

if (__builtin_expect(!!(match->nh), 0)) {
nexthop_path_fib6_result(res, fl6->mp_hash);
return;
}

if (fl6->mp_hash <= atomic_read(&match->fib6_nh->nh_common.nhc_upper_bound))
goto out;

for (sibling = ({ void *__mptr = (void *)((&match->fib6_siblings)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&match->fib6_siblings)->next)), typeof(((typeof(*sibling) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((&match->fib6_siblings)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*sibling) *)(__mptr - __builtin_offsetof(typeof(*sibling), fib6_siblings))); }), next_sibling = ({ void *__mptr = (void *)((sibling)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((sibling)->fib6_siblings.next)), typeof(((typeof(*(sibling)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((sibling)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(sibling)) *)(__mptr - __builtin_offsetof(typeof(*(sibling)), fib6_siblings))); }); !(&sibling->fib6_siblings == (&match->fib6_siblings)); sibling = next_sibling, next_sibling = ({ void *__mptr = (void *)((next_sibling)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((next_sibling)->fib6_siblings.next)), typeof(((typeof(*(next_sibling)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((next_sibling)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(next_sibling)) *)(__mptr - __builtin_offsetof(typeof(*(next_sibling)), fib6_siblings))); })) {

const struct fib6_nh *nh = sibling->fib6_nh;
int nh_upper_bound;

nh_upper_bound = atomic_read(&nh->nh_common.nhc_upper_bound);
if (fl6->mp_hash > nh_upper_bound)
continue;
if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
break;
match = sibling;
break;
}

out:
res->f6i = match;
res->nh = match->fib6_nh;
}





static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
const struct in6_addr *saddr, int oif, int flags)
{
const struct net_device *dev;

if (nh->nh_common.nhc_flags & 1)
return false;

dev = nh->nh_common.nhc_dev;
if (oif) {
if (dev->ifindex == oif)
return true;
} else {
if (ipv6_chk_addr(net, saddr, dev,
flags & 0x00000001))
return true;
}

return false;
}

struct fib6_nh_dm_arg {
struct net *net;
const struct in6_addr *saddr;
int oif;
int flags;
struct fib6_nh *nh;
};

static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
{
struct fib6_nh_dm_arg *arg = _arg;

arg->nh = nh;
return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
arg->flags);
}


static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
struct fib6_result *res,
const struct in6_addr *saddr,
int oif, int flags)
{
struct fib6_nh_dm_arg arg = {
.net = net,
.saddr = saddr,
.oif = oif,
.flags = flags,
};

if (nexthop_is_blackhole(nh))
return ((void *)0);

if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
return arg.nh;

return ((void *)0);
}

static void rt6_device_match(struct net *net, struct fib6_result *res,
const struct in6_addr *saddr, int oif, int flags)
{
struct fib6_info *f6i = res->f6i;
struct fib6_info *spf6i;
struct fib6_nh *nh;

if (!oif && ipv6_addr_any(saddr)) {
if (__builtin_expect(!!(f6i->nh), 0)) {
nh = nexthop_fib6_nh(f6i->nh);
if (nexthop_is_blackhole(f6i->nh))
goto out_blackhole;
} else {
nh = f6i->fib6_nh;
}
if (!(nh->nh_common.nhc_flags & 1))
goto out;
}

for (spf6i = f6i; spf6i; spf6i = ({ typeof(*(spf6i->fib6_next)) *__UNIQUE_ID_rcu647 = (typeof(*(spf6i->fib6_next)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_648(void) ; if (!((sizeof((spf6i->fib6_next)) == sizeof(char) || sizeof((spf6i->fib6_next)) == sizeof(short) || sizeof((spf6i->fib6_next)) == sizeof(int) || sizeof((spf6i->fib6_next)) == sizeof(long)) || sizeof((spf6i->fib6_next)) == sizeof(long long))) __compiletime_assert_648(); } while (0); (*(const volatile typeof( _Generic(((spf6i->fib6_next)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((spf6i->fib6_next)))) *)&((spf6i->fib6_next))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(spf6i->fib6_next)) *)(__UNIQUE_ID_rcu647)); })) {
bool matched = false;

if (__builtin_expect(!!(spf6i->nh), 0)) {
nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
oif, flags);
if (nh)
matched = true;
} else {
nh = spf6i->fib6_nh;
if (__rt6_device_match(net, nh, saddr, oif, flags))
matched = true;
}
if (matched) {
res->f6i = spf6i;
goto out;
}
}

if (oif && flags & 0x00000001) {
res->f6i = net->ipv6.fib6_null_entry;
nh = res->f6i->fib6_nh;
goto out;
}

if (__builtin_expect(!!(f6i->nh), 0)) {
nh = nexthop_fib6_nh(f6i->nh);
if (nexthop_is_blackhole(f6i->nh))
goto out_blackhole;
} else {
nh = f6i->fib6_nh;
}

if (nh->nh_common.nhc_flags & 1) {
res->f6i = net->ipv6.fib6_null_entry;
nh = res->f6i->fib6_nh;
}
out:
res->nh = nh;
res->fib6_type = res->f6i->fib6_type;
res->fib6_flags = res->f6i->fib6_flags;
return;

out_blackhole:
res->fib6_flags |= 0x0200;
res->fib6_type = RTN_BLACKHOLE;
res->nh = nh;
}
# 673 "net/ipv6/route.c"
static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((patchable_function_entry(0, 0))) void rt6_probe(struct fib6_nh *fib6_nh)
{
}





static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
{
enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
struct neighbour *neigh;

rcu_read_lock_bh();
neigh = __ipv6_neigh_lookup_noref(fib6_nh->nh_common.nhc_dev,
&fib6_nh->nh_common.nhc_gw.ipv6);
if (neigh) {
_raw_read_lock(&neigh->lock);
if (neigh->nud_state & (0x80|0x40|0x02|0x10|0x04|0x08))
ret = RT6_NUD_SUCCEED;






_raw_read_unlock(&neigh->lock);
} else {
ret = 0 ?
RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
}
rcu_read_unlock_bh();

return ret;
}

static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
int strict)
{
int m = 0;

if (!oif || nh->nh_common.nhc_dev->ifindex == oif)
m = 2;

if (!m && (strict & 0x00000001))
return RT6_NUD_FAIL_HARD;



if ((strict & 0x00000002) &&
!(fib6_flags & 0x00200000) && nh->nh_common.nhc_gw_family) {
int n = rt6_check_neigh(nh);
if (n < 0)
return n;
}
return m;
}

static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
int oif, int strict, int *mpri, bool *do_rr)
{
bool match_do_rr = false;
bool rc = false;
int m;

if (nh->nh_common.nhc_flags & 1)
goto out;

if (ip6_ignore_linkdown(nh->nh_common.nhc_dev) &&
nh->nh_common.nhc_flags & 16 &&
!(strict & 0x00000040))
goto out;

m = rt6_score_route(nh, fib6_flags, oif, strict);
if (m == RT6_NUD_FAIL_DO_RR) {
match_do_rr = true;
m = 0;
} else if (m == RT6_NUD_FAIL_HARD) {
goto out;
}

if (strict & 0x00000002)
rt6_probe(nh);


if (m > *mpri) {
*do_rr = match_do_rr;
*mpri = m;
rc = true;
}
out:
return rc;
}

struct fib6_nh_frl_arg {
u32 flags;
int oif;
int strict;
int *mpri;
bool *do_rr;
struct fib6_nh *nh;
};

static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
{
struct fib6_nh_frl_arg *arg = _arg;

arg->nh = nh;
return find_match(nh, arg->flags, arg->oif, arg->strict,
arg->mpri, arg->do_rr);
}

static void __find_rr_leaf(struct fib6_info *f6i_start,
struct fib6_info *nomatch, u32 metric,
struct fib6_result *res, struct fib6_info **cont,
int oif, int strict, bool *do_rr, int *mpri)
{
struct fib6_info *f6i;

for (f6i = f6i_start;
f6i && f6i != nomatch;
f6i = ({ typeof(*(f6i->fib6_next)) *__UNIQUE_ID_rcu649 = (typeof(*(f6i->fib6_next)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_650(void) ; if (!((sizeof((f6i->fib6_next)) == sizeof(char) || sizeof((f6i->fib6_next)) == sizeof(short) || sizeof((f6i->fib6_next)) == sizeof(int) || sizeof((f6i->fib6_next)) == sizeof(long)) || sizeof((f6i->fib6_next)) == sizeof(long long))) __compiletime_assert_650(); } while (0); (*(const volatile typeof( _Generic(((f6i->fib6_next)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((f6i->fib6_next)))) *)&((f6i->fib6_next))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(f6i->fib6_next)) *)(__UNIQUE_ID_rcu649)); })) {
bool matched = false;
struct fib6_nh *nh;

if (cont && f6i->fib6_metric != metric) {
*cont = f6i;
return;
}

if (fib6_check_expired(f6i))
continue;

if (__builtin_expect(!!(f6i->nh), 0)) {
struct fib6_nh_frl_arg arg = {
.flags = f6i->fib6_flags,
.oif = oif,
.strict = strict,
.mpri = mpri,
.do_rr = do_rr
};

if (nexthop_is_blackhole(f6i->nh)) {
res->fib6_flags = 0x0200;
res->fib6_type = RTN_BLACKHOLE;
res->f6i = f6i;
res->nh = nexthop_fib6_nh(f6i->nh);
return;
}
if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
&arg)) {
matched = true;
nh = arg.nh;
}
} else {
nh = f6i->fib6_nh;
if (find_match(nh, f6i->fib6_flags, oif, strict,
mpri, do_rr))
matched = true;
}
if (matched) {
res->f6i = f6i;
res->nh = nh;
res->fib6_flags = f6i->fib6_flags;
res->fib6_type = f6i->fib6_type;
}
}
}

static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
struct fib6_info *rr_head, int oif, int strict,
bool *do_rr, struct fib6_result *res)
{
u32 metric = rr_head->fib6_metric;
struct fib6_info *cont = ((void *)0);
int mpri = -1;

__find_rr_leaf(rr_head, ((void *)0), metric, res, &cont,
oif, strict, do_rr, &mpri);

__find_rr_leaf(leaf, rr_head, metric, res, &cont,
oif, strict, do_rr, &mpri);

if (res->f6i || !cont)
return;

__find_rr_leaf(cont, ((void *)0), metric, res, ((void *)0),
oif, strict, do_rr, &mpri);
}

static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
struct fib6_result *res, int strict)
{
struct fib6_info *leaf = ({ typeof(*(fn->leaf)) *__UNIQUE_ID_rcu651 = (typeof(*(fn->leaf)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_652(void) ; if (!((sizeof((fn->leaf)) == sizeof(char) || sizeof((fn->leaf)) == sizeof(short) || sizeof((fn->leaf)) == sizeof(int) || sizeof((fn->leaf)) == sizeof(long)) || sizeof((fn->leaf)) == sizeof(long long))) __compiletime_assert_652(); } while (0); (*(const volatile typeof( _Generic(((fn->leaf)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((fn->leaf)))) *)&((fn->leaf))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(fn->leaf)) *)(__UNIQUE_ID_rcu651)); });
struct fib6_info *rt0;
bool do_rr = false;
int key_plen;


res->f6i = ((void *)0);

if (!leaf || leaf == net->ipv6.fib6_null_entry)
goto out;

rt0 = ({ typeof(*(fn->rr_ptr)) *__UNIQUE_ID_rcu653 = (typeof(*(fn->rr_ptr)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_654(void) ; if (!((sizeof((fn->rr_ptr)) == sizeof(char) || sizeof((fn->rr_ptr)) == sizeof(short) || sizeof((fn->rr_ptr)) == sizeof(int) || sizeof((fn->rr_ptr)) == sizeof(long)) || sizeof((fn->rr_ptr)) == sizeof(long long))) __compiletime_assert_654(); } while (0); (*(const volatile typeof( _Generic(((fn->rr_ptr)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((fn->rr_ptr)))) *)&((fn->rr_ptr))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(fn->rr_ptr)) *)(__UNIQUE_ID_rcu653)); });
if (!rt0)
rt0 = leaf;






key_plen = rt0->fib6_dst.plen;




if (fn->fn_bit != key_plen)
goto out;

find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
if (do_rr) {
struct fib6_info *next = ({ typeof(*(rt0->fib6_next)) *__UNIQUE_ID_rcu655 = (typeof(*(rt0->fib6_next)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_656(void) ; if (!((sizeof((rt0->fib6_next)) == sizeof(char) || sizeof((rt0->fib6_next)) == sizeof(short) || sizeof((rt0->fib6_next)) == sizeof(int) || sizeof((rt0->fib6_next)) == sizeof(long)) || sizeof((rt0->fib6_next)) == sizeof(long long))) __compiletime_assert_656(); } while (0); (*(const volatile typeof( _Generic(((rt0->fib6_next)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt0->fib6_next)))) *)&((rt0->fib6_next))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt0->fib6_next)) *)(__UNIQUE_ID_rcu655)); });


if (!next || next->fib6_metric != rt0->fib6_metric)
next = leaf;

if (next != rt0) {
spin_lock_bh(&leaf->fib6_table->tb6_lock);

if (next->fib6_node)
do { uintptr_t _r_a_p__v = (uintptr_t)(next); ; if (__builtin_constant_p(next) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_657(void) ; if (!((sizeof((fn->rr_ptr)) == sizeof(char) || sizeof((fn->rr_ptr)) == sizeof(short) || sizeof((fn->rr_ptr)) == sizeof(int) || sizeof((fn->rr_ptr)) == sizeof(long)) || sizeof((fn->rr_ptr)) == sizeof(long long))) __compiletime_assert_657(); } while (0); do { *(volatile typeof((fn->rr_ptr)) *)&((fn->rr_ptr)) = ((typeof(fn->rr_ptr))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_658(void) ; if (!((sizeof(*&fn->rr_ptr) == sizeof(char) || sizeof(*&fn->rr_ptr) == sizeof(short) || sizeof(*&fn->rr_ptr) == sizeof(int) || sizeof(*&fn->rr_ptr) == sizeof(long)))) __compiletime_assert_658(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_659(void) ; if (!((sizeof(*&fn->rr_ptr) == sizeof(char) || sizeof(*&fn->rr_ptr) == sizeof(short) || sizeof(*&fn->rr_ptr) == sizeof(int) || sizeof(*&fn->rr_ptr) == sizeof(long)) || sizeof(*&fn->rr_ptr) == sizeof(long long))) __compiletime_assert_659(); } while (0); do { *(volatile typeof(*&fn->rr_ptr) *)&(*&fn->rr_ptr) = ((typeof(*((typeof(fn->rr_ptr))_r_a_p__v)) *)((typeof(fn->rr_ptr))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
spin_unlock_bh(&leaf->fib6_table->tb6_lock);
}
}

out:
if (!res->f6i) {
res->f6i = net->ipv6.fib6_null_entry;
res->nh = res->f6i->fib6_nh;
res->fib6_flags = res->f6i->fib6_flags;
res->fib6_type = res->f6i->fib6_type;
}
}

static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
{
return (res->f6i->fib6_flags & 0x00200000) ||
res->nh->nh_common.nhc_gw_family;
}
# 1007 "net/ipv6/route.c"
static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
{
struct net_device *dev = res->nh->nh_common.nhc_dev;

if (res->fib6_flags & (0x80000000 | 0x00100000)) {




if (netif_is_l3_slave(dev) &&
!rt6_need_strict(&res->f6i->fib6_dst.addr))
dev = l3mdev_master_dev_rcu(dev);
else if (!netif_is_l3_master(dev))
dev = dev_net(dev)->loopback_dev;



}

return dev;
}

static const int fib6_prop[(__RTN_MAX - 1) + 1] = {
[RTN_UNSPEC] = 0,
[RTN_UNICAST] = 0,
[RTN_LOCAL] = 0,
[RTN_BROADCAST] = 0,
[RTN_ANYCAST] = 0,
[RTN_MULTICAST] = 0,
[RTN_BLACKHOLE] = -22,
[RTN_UNREACHABLE] = -113,
[RTN_PROHIBIT] = -13,
[RTN_THROW] = -11,
[RTN_NAT] = -22,
[RTN_XRESOLVE] = -22,
};

static int ip6_rt_type_to_error(u8 fib6_type)
{
return fib6_prop[fib6_type];
}

static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
{
unsigned short flags = 0;

if (rt->dst_nocount)
flags |= 0x0008;
if (rt->dst_nopolicy)
flags |= 0x0004;

return flags;
}

static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
{
rt->dst.error = ip6_rt_type_to_error(fib6_type);

switch (fib6_type) {
case RTN_BLACKHOLE:
rt->dst.output = dst_discard_out;
rt->dst.input = dst_discard;
break;
case RTN_PROHIBIT:
rt->dst.output = ip6_pkt_prohibit_out;
rt->dst.input = ip6_pkt_prohibit;
break;
case RTN_THROW:
case RTN_UNREACHABLE:
default:
rt->dst.output = ip6_pkt_discard_out;
rt->dst.input = ip6_pkt_discard;
break;
}
}

static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
{
struct fib6_info *f6i = res->f6i;

if (res->fib6_flags & 0x0200) {
ip6_rt_init_dst_reject(rt, res->fib6_type);
return;
}

rt->dst.error = 0;
rt->dst.output = ip6_output;

if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
rt->dst.input = ip6_input;
} else if (ipv6_addr_type(&f6i->fib6_dst.addr) & 0x0002U) {
rt->dst.input = ip6_mc_input;
} else {
rt->dst.input = ip6_forward;
}

if (res->nh->nh_common.nhc_lwtstate) {
rt->dst.lwtstate = lwtstate_get(res->nh->nh_common.nhc_lwtstate);
lwtunnel_set_redirect(&rt->dst);
}

rt->dst.lastuse = jiffies;
}


static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
{
rt->rt6i_flags &= ~0x00400000;
do { uintptr_t _r_a_p__v = (uintptr_t)(from); ; if (__builtin_constant_p(from) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_660(void) ; if (!((sizeof((rt->from)) == sizeof(char) || sizeof((rt->from)) == sizeof(short) || sizeof((rt->from)) == sizeof(int) || sizeof((rt->from)) == sizeof(long)) || sizeof((rt->from)) == sizeof(long long))) __compiletime_assert_660(); } while (0); do { *(volatile typeof((rt->from)) *)&((rt->from)) = ((typeof(rt->from))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_661(void) ; if (!((sizeof(*&rt->from) == sizeof(char) || sizeof(*&rt->from) == sizeof(short) || sizeof(*&rt->from) == sizeof(int) || sizeof(*&rt->from) == sizeof(long)))) __compiletime_assert_661(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_662(void) ; if (!((sizeof(*&rt->from) == sizeof(char) || sizeof(*&rt->from) == sizeof(short) || sizeof(*&rt->from) == sizeof(int) || sizeof(*&rt->from) == sizeof(long)) || sizeof(*&rt->from) == sizeof(long long))) __compiletime_assert_662(); } while (0); do { *(volatile typeof(*&rt->from) *)&(*&rt->from) = ((typeof(*((typeof(rt->from))_r_a_p__v)) *)((typeof(rt->from))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
}


static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
{
const struct fib6_nh *nh = res->nh;
const struct net_device *dev = nh->nh_common.nhc_dev;
struct fib6_info *f6i = res->f6i;

ip6_rt_init_dst(rt, res);

rt->rt6i_dst = f6i->fib6_dst;
rt->rt6i_idev = dev ? in6_dev_get(dev) : ((void *)0);
rt->rt6i_flags = res->fib6_flags;
if (nh->nh_common.nhc_gw_family) {
rt->rt6i_gateway = nh->nh_common.nhc_gw.ipv6;
rt->rt6i_flags |= 0x0002;
}
rt6_set_from(rt, f6i);



}

static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
struct in6_addr *saddr)
{
struct fib6_node *pn, *sn;
while (1) {
if (fn->fn_flags & 0x0001)
return ((void *)0);
pn = ({ typeof(*(fn->parent)) *__UNIQUE_ID_rcu663 = (typeof(*(fn->parent)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_664(void) ; if (!((sizeof((fn->parent)) == sizeof(char) || sizeof((fn->parent)) == sizeof(short) || sizeof((fn->parent)) == sizeof(int) || sizeof((fn->parent)) == sizeof(long)) || sizeof((fn->parent)) == sizeof(long long))) __compiletime_assert_664(); } while (0); (*(const volatile typeof( _Generic(((fn->parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((fn->parent)))) *)&((fn->parent))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(fn->parent)) *)(__UNIQUE_ID_rcu663)); });
sn = ((void *)0);
if (sn && sn != fn)
fn = fib6_node_lookup(sn, ((void *)0), saddr);
else
fn = pn;
if (fn->fn_flags & 0x0004)
return fn;
}
}

static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
{
struct rt6_info *rt = *prt;

if (dst_hold_safe(&rt->dst))
return true;
if (net) {
rt = net->ipv6.ip6_null_entry;
dst_hold(&rt->dst);
} else {
rt = ((void *)0);
}
*prt = rt;
return false;
}


static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
{
struct net_device *dev = res->nh->nh_common.nhc_dev;
struct fib6_info *f6i = res->f6i;
unsigned short flags;
struct rt6_info *nrt;

if (!fib6_info_hold_safe(f6i))
goto fallback;

flags = fib6_info_dst_flags(f6i);
nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
if (!nrt) {
fib6_info_release(f6i);
goto fallback;
}

ip6_rt_copy_init(nrt, res);
return nrt;

fallback:
nrt = dev_net(dev)->ipv6.ip6_null_entry;
dst_hold(&nrt->dst);
return nrt;
}

static struct rt6_info *ip6_pol_route_lookup(struct net *net,
struct fib6_table *table,
struct flowi6 *fl6,
const struct sk_buff *skb,
int flags)
{
struct fib6_result res = {};
struct fib6_node *fn;
struct rt6_info *rt;

rcu_read_lock();
fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
restart:
res.f6i = ({ typeof(*(fn->leaf)) *__UNIQUE_ID_rcu665 = (typeof(*(fn->leaf)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_666(void) ; if (!((sizeof((fn->leaf)) == sizeof(char) || sizeof((fn->leaf)) == sizeof(short) || sizeof((fn->leaf)) == sizeof(int) || sizeof((fn->leaf)) == sizeof(long)) || sizeof((fn->leaf)) == sizeof(long long))) __compiletime_assert_666(); } while (0); (*(const volatile typeof( _Generic(((fn->leaf)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((fn->leaf)))) *)&((fn->leaf))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(fn->leaf)) *)(__UNIQUE_ID_rcu665)); });
if (!res.f6i)
res.f6i = net->ipv6.fib6_null_entry;
else
rt6_device_match(net, &res, &fl6->saddr, fl6->__fl_common.flowic_oif,
flags);

if (res.f6i == net->ipv6.fib6_null_entry) {
fn = fib6_backtrack(fn, &fl6->saddr);
if (fn)
goto restart;

rt = net->ipv6.ip6_null_entry;
dst_hold(&rt->dst);
goto out;
} else if (res.fib6_flags & 0x0200) {
goto do_create;
}

fib6_select_path(net, &res, fl6, fl6->__fl_common.flowic_oif,
fl6->__fl_common.flowic_oif != 0, skb, flags);


rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
if (rt) {
if (ip6_hold_safe(net, &rt))
dst_use_noref(&rt->dst, jiffies);
} else {
do_create:
rt = ip6_create_rt_rcu(&res);
}

out:
trace_fib6_table_lookup(net, &res, table, fl6);

rcu_read_unlock();

return rt;
}

struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
const struct sk_buff *skb, int flags)
{
return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
}
extern typeof(ip6_route_lookup) ip6_route_lookup; extern const char __kstrtab_ip6_route_lookup[]; extern const char __kstrtabns_ip6_route_lookup[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "ip6_route_lookup" ": \n" " .asciz \"" "ip6_route_lookup" "\" \n" "__kstrtabns_" "ip6_route_lookup" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab_ip6_route_lookup __attribute__((section("___ksymtab" "_gpl" "+" "ip6_route_lookup"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&ip6_route_lookup, __kstrtab_ip6_route_lookup, __kstrtabns_ip6_route_lookup };

struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
const struct in6_addr *saddr, int oif,
const struct sk_buff *skb, int strict)
{
struct flowi6 fl6 = {
.__fl_common.flowic_oif = oif,
.daddr = *daddr,
};
struct dst_entry *dst;
int flags = strict ? 0x00000001 : 0;

if (saddr) {
memcpy(&fl6.saddr, saddr, sizeof(*saddr));
flags |= 0x00000004;
}

dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
if (dst->error == 0)
return (struct rt6_info *) dst;

dst_release(dst);

return ((void *)0);
}
extern typeof(rt6_lookup) rt6_lookup; extern const char __kstrtab_rt6_lookup[]; extern const char __kstrtabns_rt6_lookup[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "rt6_lookup" ": \n" " .asciz \"" "rt6_lookup" "\" \n" "__kstrtabns_" "rt6_lookup" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab_rt6_lookup __attribute__((section("___ksymtab" "" "+" "rt6_lookup"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&rt6_lookup, __kstrtab_rt6_lookup, __kstrtabns_rt6_lookup };







static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
struct netlink_ext_ack *extack)
{
int err;
struct fib6_table *table;

table = rt->fib6_table;
spin_lock_bh(&table->tb6_lock);
err = fib6_add(&table->tb6_root, rt, info, extack);
spin_unlock_bh(&table->tb6_lock);

return err;
}

int ip6_ins_rt(struct net *net, struct fib6_info *rt)
{
struct nl_info info = { .nl_net = net, };

return __ip6_ins_rt(rt, &info, ((void *)0));
}

static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
struct fib6_info *f6i = res->f6i;
struct net_device *dev;
struct rt6_info *rt;





if (!fib6_info_hold_safe(f6i))
return ((void *)0);

dev = ip6_rt_get_dev_rcu(res);
rt = ip6_dst_alloc(dev_net(dev), dev, 0);
if (!rt) {
fib6_info_release(f6i);
return ((void *)0);
}

ip6_rt_copy_init(rt, res);
rt->rt6i_flags |= 0x01000000;
rt->rt6i_dst.addr = *daddr;
rt->rt6i_dst.plen = 128;

if (!rt6_is_gw_or_nonexthop(res)) {
if (f6i->fib6_dst.plen != 128 &&
ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
rt->rt6i_flags |= 0x00100000;






}

return rt;
}

static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
{
struct fib6_info *f6i = res->f6i;
unsigned short flags = fib6_info_dst_flags(f6i);
struct net_device *dev;
struct rt6_info *pcpu_rt;

if (!fib6_info_hold_safe(f6i))
return ((void *)0);

rcu_read_lock();
dev = ip6_rt_get_dev_rcu(res);
pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags | 0x0008);
rcu_read_unlock();
if (!pcpu_rt) {
fib6_info_release(f6i);
return ((void *)0);
}
ip6_rt_copy_init(pcpu_rt, res);
pcpu_rt->rt6i_flags |= 0x40000000;

if (f6i->nh)
pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));

return pcpu_rt;
}

static bool rt6_is_valid(const struct rt6_info *rt6)
{
return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
}


static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
{
struct rt6_info *pcpu_rt;

pcpu_rt = ({ typeof(*res->nh->rt6i_pcpu) pscr_ret__; do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*res->nh->rt6i_pcpu)) { case 1: pscr_ret__ = ({ typeof(*res->nh->rt6i_pcpu) __ret; if ((sizeof(*res->nh->rt6i_pcpu) == sizeof(char) || sizeof(*res->nh->rt6i_pcpu) == sizeof(short) || sizeof(*res->nh->rt6i_pcpu) == sizeof(int) || sizeof(*res->nh->rt6i_pcpu) == sizeof(long))) __ret = ({ typeof(*res->nh->rt6i_pcpu) ___ret; do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ___ret = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_667(void) ; if (!((sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(char) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(short) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(int) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long)) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long long))) __compiletime_assert_667(); } while (0); (*(const volatile typeof( _Generic((*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })))) *)&(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }))); }); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); ___ret; }); else __ret = ({ typeof(*res->nh->rt6i_pcpu) ___ret; unsigned long ___flags; do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); ___flags = arch_local_irq_save(); } while (0); ___ret = ({ *({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(___flags); } while (0); ___ret; }); __ret; }); break; case 2: pscr_ret__ = ({ typeof(*res->nh->rt6i_pcpu) __ret; if ((sizeof(*res->nh->rt6i_pcpu) == sizeof(char) || sizeof(*res->nh->rt6i_pcpu) == sizeof(short) || sizeof(*res->nh->rt6i_pcpu) == sizeof(int) || sizeof(*res->nh->rt6i_pcpu) == sizeof(long))) __ret = ({ typeof(*res->nh->rt6i_pcpu) ___ret; do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ___ret = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_668(void) ; if (!((sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(char) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(short) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(int) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long)) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long long))) __compiletime_assert_668(); } while (0); (*(const volatile typeof( _Generic((*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })))) *)&(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }))); }); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); ___ret; }); else __ret = ({ typeof(*res->nh->rt6i_pcpu) ___ret; unsigned long ___flags; do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); ___flags = arch_local_irq_save(); } while (0); ___ret = ({ *({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(___flags); } while (0); ___ret; }); __ret; }); break; case 4: pscr_ret__ = ({ typeof(*res->nh->rt6i_pcpu) __ret; if ((sizeof(*res->nh->rt6i_pcpu) == sizeof(char) || sizeof(*res->nh->rt6i_pcpu) == sizeof(short) || sizeof(*res->nh->rt6i_pcpu) == sizeof(int) || sizeof(*res->nh->rt6i_pcpu) == sizeof(long))) __ret = ({ typeof(*res->nh->rt6i_pcpu) ___ret; do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ___ret = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_669(void) ; if (!((sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(char) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(short) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(int) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long)) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long long))) __compiletime_assert_669(); } while (0); (*(const volatile typeof( _Generic((*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })))) *)&(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }))); }); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); ___ret; }); else __ret = ({ typeof(*res->nh->rt6i_pcpu) ___ret; unsigned long ___flags; do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); ___flags = arch_local_irq_save(); } while (0); ___ret = ({ *({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(___flags); } while (0); ___ret; }); __ret; }); break; case 8: pscr_ret__ = ({ typeof(*res->nh->rt6i_pcpu) __ret; if ((sizeof(*res->nh->rt6i_pcpu) == sizeof(char) || sizeof(*res->nh->rt6i_pcpu) == sizeof(short) || sizeof(*res->nh->rt6i_pcpu) == sizeof(int) || sizeof(*res->nh->rt6i_pcpu) == sizeof(long))) __ret = ({ typeof(*res->nh->rt6i_pcpu) ___ret; do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ___ret = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_670(void) ; if (!((sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(char) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(short) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(int) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long)) || sizeof(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })) == sizeof(long long))) __compiletime_assert_670(); } while (0); (*(const volatile typeof( _Generic((*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); })))) *)&(*({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }))); }); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); ___ret; }); else __ret = ({ typeof(*res->nh->rt6i_pcpu) ___ret; unsigned long ___flags; do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); ___flags = arch_local_irq_save(); } while (0); ___ret = ({ *({ do { const void *__vpp_verify = (typeof((&(*res->nh->rt6i_pcpu)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu))); (typeof((typeof(*(&(*res->nh->rt6i_pcpu))) *)(&(*res->nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }); }); do { ({ unsigned long __dummy; typeof(___flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(___flags); } while (0); ___ret; }); __ret; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; });

if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
struct rt6_info *prev, **p;

p = ({ do { const void *__vpp_verify = (typeof((res->nh->rt6i_pcpu) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(res->nh->rt6i_pcpu)) *)(res->nh->rt6i_pcpu)); (typeof((typeof(*(res->nh->rt6i_pcpu)) *)(res->nh->rt6i_pcpu))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });
prev = ({ typeof(p) __ai_ptr = (p); do { } while (0); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__(*(__ai_ptr)) _x_ = (((void *)0)); (__typeof__(*(__ai_ptr))) ({ __typeof__((__ai_ptr)) __ptr = ((__ai_ptr)); __typeof__(_x_) __new = (_x_); __typeof__(*((__ai_ptr))) __ret; switch (sizeof(*(__ai_ptr))) { case 4: __asm__ __volatile__ ( " amoswap.w.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_671(void) ; if (!(!(1))) __compiletime_assert_671(); } while (0); } __ret; }); }); });
if (prev) {
dst_dev_put(&prev->dst);
dst_release(&prev->dst);
}

pcpu_rt = ((void *)0);
}

return pcpu_rt;
}

static struct rt6_info *rt6_make_pcpu_route(struct net *net,
const struct fib6_result *res)
{
struct rt6_info *pcpu_rt, *prev, **p;

pcpu_rt = ip6_rt_pcpu_alloc(res);
if (!pcpu_rt)
return ((void *)0);

p = ({ do { const void *__vpp_verify = (typeof((res->nh->rt6i_pcpu) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(res->nh->rt6i_pcpu)) *)(res->nh->rt6i_pcpu)); (typeof((typeof(*(res->nh->rt6i_pcpu)) *)(res->nh->rt6i_pcpu))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); });
prev = ({ typeof(p) __ai_ptr = (p); do { } while (0); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__(*(__ai_ptr)) _o_ = (((void *)0)); __typeof__(*(__ai_ptr)) _n_ = (pcpu_rt); (__typeof__(*(__ai_ptr))) ({ __typeof__((__ai_ptr)) __ptr = ((__ai_ptr)); __typeof__(*((__ai_ptr))) __old = (_o_); __typeof__(*((__ai_ptr))) __new = (_n_); __typeof__(*((__ai_ptr))) __ret; register unsigned int __rc; switch (sizeof(*(__ai_ptr))) { case 4: __asm__ __volatile__ ( "0: lr.w %0, %2\n" " bne %0, %z3, 1f\n" " sc.w.rl %1, %z4, %2\n" " bnez %1, 0b\n" " fence rw, rw\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" ((long)__old), "rJ" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( "0: lr.d %0, %2\n" " bne %0, %z3, 1f\n" " sc.d.rl %1, %z4, %2\n" " bnez %1, 0b\n" " fence rw, rw\n" "1:\n" : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) : "rJ" (__old), "rJ" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_672(void) ; if (!(!(1))) __compiletime_assert_672(); } while (0); } __ret; }); }); });
do { if (__builtin_expect(!!(prev), 0)) do { do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("net/ipv6/route.c"), "i" (1423), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ; __builtin_unreachable(); } while (0); } while (0); } while (0);

if (res->f6i->fib6_destroying) {
struct fib6_info *from;

from = ({ typeof(( struct fib6_info **)&pcpu_rt->from) __ai_ptr = (( struct fib6_info **)&pcpu_rt->from); do { } while (0); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__(*(__ai_ptr)) _x_ = (((void *)0)); (__typeof__(*(__ai_ptr))) ({ __typeof__((__ai_ptr)) __ptr = ((__ai_ptr)); __typeof__(_x_) __new = (_x_); __typeof__(*((__ai_ptr))) __ret; switch (sizeof(*(__ai_ptr))) { case 4: __asm__ __volatile__ ( " amoswap.w.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_673(void) ; if (!(!(1))) __compiletime_assert_673(); } while (0); } __ret; }); }); });
fib6_info_release(from);
}

return pcpu_rt;
}



static spinlock_t rt6_exception_lock = (spinlock_t) { { .rlock = { .raw_lock = { 0 }, .magic = 0xdead4ead, .owner_cpu = -1, .owner = ((void *)-1L), .dep_map = { .name = "rt6_exception_lock", .wait_type_inner = LD_WAIT_CONFIG, } } } };




static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
struct rt6_exception *rt6_ex)
{
struct fib6_info *from;
struct net *net;

if (!bucket || !rt6_ex)
return;

net = dev_net(rt6_ex->rt6i->dst.dev);
net->ipv6.rt6_stats->fib_rt_cache--;




from = ({ typeof(( struct fib6_info **)&rt6_ex->rt6i->from) __ai_ptr = (( struct fib6_info **)&rt6_ex->rt6i->from); do { } while (0); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__(*(__ai_ptr)) _x_ = (((void *)0)); (__typeof__(*(__ai_ptr))) ({ __typeof__((__ai_ptr)) __ptr = ((__ai_ptr)); __typeof__(_x_) __new = (_x_); __typeof__(*((__ai_ptr))) __ret; switch (sizeof(*(__ai_ptr))) { case 4: __asm__ __volatile__ ( " amoswap.w.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_674(void) ; if (!(!(1))) __compiletime_assert_674(); } while (0); } __ret; }); }); });
fib6_info_release(from);
dst_dev_put(&rt6_ex->rt6i->dst);

hlist_del_rcu(&rt6_ex->hlist);
dst_release(&rt6_ex->rt6i->dst);
do { typeof (rt6_ex) ___p = (rt6_ex); if (___p) { do { __attribute__((__noreturn__)) extern void __compiletime_assert_675(void) ; if (!(!(!((__builtin_offsetof(typeof(*(rt6_ex)), rcu)) < 4096)))) __compiletime_assert_675(); } while (0); kvfree_call_rcu(&((___p)->rcu), (rcu_callback_t)(unsigned long) (__builtin_offsetof(typeof(*(rt6_ex)), rcu))); } } while (0);
({ int __ret_warn_on = !!(!bucket->depth); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("net/ipv6/route.c"), "i" (1464), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
bucket->depth--;
}




static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
{
struct rt6_exception *rt6_ex, *oldest = ((void *)0);

if (!bucket)
return;

for (rt6_ex = ({ typeof((&bucket->chain)->first) ____ptr = ((&bucket->chain)->first); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(rt6_ex)) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(rt6_ex)) *)(__mptr - __builtin_offsetof(typeof(*(rt6_ex)), hlist))); }) : ((void *)0); }); rt6_ex; rt6_ex = ({ typeof((rt6_ex)->hlist.next) ____ptr = ((rt6_ex)->hlist.next); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(rt6_ex)) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(rt6_ex)) *)(__mptr - __builtin_offsetof(typeof(*(rt6_ex)), hlist))); }) : ((void *)0); })) {
if (!oldest || (({ unsigned long __dummy; typeof(oldest->stamp) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy; typeof(rt6_ex->stamp) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ((long)((rt6_ex->stamp) - (oldest->stamp)) < 0)))
oldest = rt6_ex;
}
rt6_remove_exception(bucket, oldest);
}

static u32 rt6_exception_hash(const struct in6_addr *dst,
const struct in6_addr *src)
{
static siphash_key_t __attribute__((__aligned__(16))) rt6_exception_key;
struct {
struct in6_addr dst;
struct in6_addr src;
} __attribute__((__aligned__(__alignof__(u64)))) combined = {
.dst = *dst,
};
u64 val;

({ bool ___ret = false; static bool __attribute__((__section__(".data.once"))) ___done = false; static struct static_key_true ___once_key = (struct static_key_true) { .key = { .enabled = { (1) } }, }; if (__builtin_expect(!!(({ if (!__builtin_types_compatible_p(typeof(*&(&___once_key)->key), struct static_key) && !__builtin_types_compatible_p(typeof(*&(&___once_key)->key), struct static_key_true) && !__builtin_types_compatible_p(typeof(*&(&___once_key)->key), struct static_key_false)) ____wrong_branch_error(); static_key_count((struct static_key *)&(&___once_key)->key) > 0; })), 0)) { unsigned long ___flags; ___ret = __do_once_start(&___done, &___flags); if (__builtin_expect(!!(___ret), 0)) { get_random_bytes(((&rt6_exception_key)), ((sizeof(rt6_exception_key)))); __do_once_done(&___done, &___once_key, &___flags, ((struct module *)0)); } } ___ret; });





val = siphash(&combined, sizeof(combined), &rt6_exception_key);

return hash_64_generic(val, 10);
}






static struct rt6_exception *
__rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
struct rt6_exception *rt6_ex;
u32 hval;

if (!(*bucket) || !daddr)
return ((void *)0);

hval = rt6_exception_hash(daddr, saddr);
*bucket += hval;

for (rt6_ex = ({ typeof((&(*bucket)->chain)->first) ____ptr = ((&(*bucket)->chain)->first); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(rt6_ex)) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(rt6_ex)) *)(__mptr - __builtin_offsetof(typeof(*(rt6_ex)), hlist))); }) : ((void *)0); }); rt6_ex; rt6_ex = ({ typeof((rt6_ex)->hlist.next) ____ptr = ((rt6_ex)->hlist.next); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(rt6_ex)) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(rt6_ex)) *)(__mptr - __builtin_offsetof(typeof(*(rt6_ex)), hlist))); }) : ((void *)0); })) {
struct rt6_info *rt6 = rt6_ex->rt6i;
bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);





if (matched)
return rt6_ex;
}
return ((void *)0);
}






static struct rt6_exception *
__rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
struct rt6_exception *rt6_ex;
u32 hval;

({ int __ret_warn_on = !!(!rcu_read_lock_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("net/ipv6/route.c"), "i" (1554), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });

if (!(*bucket) || !daddr)
return ((void *)0);

hval = rt6_exception_hash(daddr, saddr);
*bucket += hval;

for (({ ; }), rt6_ex = ({ typeof(({ typeof((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) __UNIQUE_ID_rcu676 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_677(void) ; if (!((sizeof((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) == sizeof(char) || sizeof((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) == sizeof(short) || sizeof((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) == sizeof(int) || sizeof((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) == sizeof(long)) || sizeof((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) == sizeof(long long))) __compiletime_assert_677(); } while (0); (*(const volatile typeof( _Generic(((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))))) *)&((*((struct hlist_node **)(&(&(*bucket)->chain)->first))))); }); ((typeof(*(*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) *)(__UNIQUE_ID_rcu676)); })) ____ptr = (({ typeof((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) __UNIQUE_ID_rcu676 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_677(void) ; if (!((sizeof((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) == sizeof(char) || sizeof((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) == sizeof(short) || sizeof((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) == sizeof(int) || sizeof((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) == sizeof(long)) || sizeof((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) == sizeof(long long))) __compiletime_assert_677(); } while (0); (*(const volatile typeof( _Generic(((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((*((struct hlist_node **)(&(&(*bucket)->chain)->first)))))) *)&((*((struct hlist_node **)(&(&(*bucket)->chain)->first))))); }); ((typeof(*(*((struct hlist_node **)(&(&(*bucket)->chain)->first)))) *)(__UNIQUE_ID_rcu676)); })); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(rt6_ex)) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(rt6_ex)) *)(__mptr - __builtin_offsetof(typeof(*(rt6_ex)), hlist))); }) : ((void *)0); }); rt6_ex; rt6_ex = ({ typeof(({ typeof((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) __UNIQUE_ID_rcu678 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_679(void) ; if (!((sizeof((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) == sizeof(char) || sizeof((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) == sizeof(short) || sizeof((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) == sizeof(int) || sizeof((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) == sizeof(long)) || sizeof((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) == sizeof(long long))) __compiletime_assert_679(); } while (0); (*(const volatile typeof( _Generic(((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))))) *)&((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next))))); }); ((typeof(*(*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) *)(__UNIQUE_ID_rcu678)); })) ____ptr = (({ typeof((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) __UNIQUE_ID_rcu678 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_679(void) ; if (!((sizeof((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) == sizeof(char) || sizeof((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) == sizeof(short) || sizeof((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) == sizeof(int) || sizeof((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) == sizeof(long)) || sizeof((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) == sizeof(long long))) __compiletime_assert_679(); } while (0); (*(const volatile typeof( _Generic(((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))))) *)&((*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next))))); }); ((typeof(*(*((struct hlist_node **)(&(&(rt6_ex)->hlist)->next)))) *)(__UNIQUE_ID_rcu678)); })); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(rt6_ex)) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(rt6_ex)) *)(__mptr - __builtin_offsetof(typeof(*(rt6_ex)), hlist))); }) : ((void *)0); })) {
struct rt6_info *rt6 = rt6_ex->rt6i;
bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);





if (matched)
return rt6_ex;
}
return ((void *)0);
}

static unsigned int fib6_mtu(const struct fib6_result *res)
{
const struct fib6_nh *nh = res->nh;
unsigned int mtu;

if (res->f6i->fib6_metrics->metrics[RTAX_MTU-1]) {
mtu = res->f6i->fib6_metrics->metrics[RTAX_MTU-1];
} else {
struct net_device *dev = nh->nh_common.nhc_dev;
struct inet6_dev *idev;

rcu_read_lock();
idev = __in6_dev_get(dev);
mtu = idev->cnf.mtu6;
rcu_read_unlock();
}

mtu = __builtin_choose_expr(((!!(sizeof((typeof((unsigned int)(mtu)) *)1 == (typeof((unsigned int)((0xFFFF + sizeof(struct ipv6hdr)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned int)(mtu)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned int)((0xFFFF + sizeof(struct ipv6hdr)))) * 0l)) : (int *)8))))), (((unsigned int)(mtu)) < ((unsigned int)((0xFFFF + sizeof(struct ipv6hdr)))) ? ((unsigned int)(mtu)) : ((unsigned int)((0xFFFF + sizeof(struct ipv6hdr))))), ({ typeof((unsigned int)(mtu)) __UNIQUE_ID___x680 = ((unsigned int)(mtu)); typeof((unsigned int)((0xFFFF + sizeof(struct ipv6hdr)))) __UNIQUE_ID___y681 = ((unsigned int)((0xFFFF + sizeof(struct ipv6hdr)))); ((__UNIQUE_ID___x680) < (__UNIQUE_ID___y681) ? (__UNIQUE_ID___x680) : (__UNIQUE_ID___y681)); }));

return mtu - lwtunnel_headroom(nh->nh_common.nhc_lwtstate, mtu);
}
# 1605 "net/ipv6/route.c"
static
struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
spinlock_t *lock)
{
struct rt6_exception_bucket *bucket;

if (lock)
bucket = ({ do { } while (0 && (!((lock_is_held(&(lock)->dep_map))))); ; ((typeof(*(nh->rt6i_exception_bucket)) *)((nh->rt6i_exception_bucket))); });

else
bucket = ({ typeof(*(nh->rt6i_exception_bucket)) *__UNIQUE_ID_rcu682 = (typeof(*(nh->rt6i_exception_bucket)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_683(void) ; if (!((sizeof((nh->rt6i_exception_bucket)) == sizeof(char) || sizeof((nh->rt6i_exception_bucket)) == sizeof(short) || sizeof((nh->rt6i_exception_bucket)) == sizeof(int) || sizeof((nh->rt6i_exception_bucket)) == sizeof(long)) || sizeof((nh->rt6i_exception_bucket)) == sizeof(long long))) __compiletime_assert_683(); } while (0); (*(const volatile typeof( _Generic(((nh->rt6i_exception_bucket)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->rt6i_exception_bucket)))) *)&((nh->rt6i_exception_bucket))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(nh->rt6i_exception_bucket)) *)(__UNIQUE_ID_rcu682)); });


if (bucket) {
unsigned long p = (unsigned long)bucket;

p &= ~0x1UL;
bucket = (struct rt6_exception_bucket *)p;
}

return bucket;
}

static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
{
unsigned long p = (unsigned long)bucket;

return !!(p & 0x1UL);
}


static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
spinlock_t *lock)
{
struct rt6_exception_bucket *bucket;
unsigned long p;

bucket = ({ do { } while (0 && (!((lock_is_held(&(lock)->dep_map))))); ; ((typeof(*(nh->rt6i_exception_bucket)) *)((nh->rt6i_exception_bucket))); });


p = (unsigned long)bucket;
p |= 0x1UL;
bucket = (struct rt6_exception_bucket *)p;
do { uintptr_t _r_a_p__v = (uintptr_t)(bucket); ; if (__builtin_constant_p(bucket) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_684(void) ; if (!((sizeof((nh->rt6i_exception_bucket)) == sizeof(char) || sizeof((nh->rt6i_exception_bucket)) == sizeof(short) || sizeof((nh->rt6i_exception_bucket)) == sizeof(int) || sizeof((nh->rt6i_exception_bucket)) == sizeof(long)) || sizeof((nh->rt6i_exception_bucket)) == sizeof(long long))) __compiletime_assert_684(); } while (0); do { *(volatile typeof((nh->rt6i_exception_bucket)) *)&((nh->rt6i_exception_bucket)) = ((typeof(nh->rt6i_exception_bucket))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_685(void) ; if (!((sizeof(*&nh->rt6i_exception_bucket) == sizeof(char) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(short) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(int) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(long)))) __compiletime_assert_685(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_686(void) ; if (!((sizeof(*&nh->rt6i_exception_bucket) == sizeof(char) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(short) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(int) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(long)) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(long long))) __compiletime_assert_686(); } while (0); do { *(volatile typeof(*&nh->rt6i_exception_bucket) *)&(*&nh->rt6i_exception_bucket) = ((typeof(*((typeof(nh->rt6i_exception_bucket))_r_a_p__v)) *)((typeof(nh->rt6i_exception_bucket))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
}

static int rt6_insert_exception(struct rt6_info *nrt,
const struct fib6_result *res)
{
struct net *net = dev_net(nrt->dst.dev);
struct rt6_exception_bucket *bucket;
struct fib6_info *f6i = res->f6i;
struct in6_addr *src_key = ((void *)0);
struct rt6_exception *rt6_ex;
struct fib6_nh *nh = res->nh;
int max_depth;
int err = 0;

spin_lock_bh(&rt6_exception_lock);

bucket = ({ do { } while (0 && (!((lock_is_held(&(&rt6_exception_lock)->dep_map))))); ; ((typeof(*(nh->rt6i_exception_bucket)) *)((nh->rt6i_exception_bucket))); });

if (!bucket) {
bucket = kcalloc((1 << 10), sizeof(*bucket),
((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
if (!bucket) {
err = -12;
goto out;
}
do { uintptr_t _r_a_p__v = (uintptr_t)(bucket); ; if (__builtin_constant_p(bucket) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_687(void) ; if (!((sizeof((nh->rt6i_exception_bucket)) == sizeof(char) || sizeof((nh->rt6i_exception_bucket)) == sizeof(short) || sizeof((nh->rt6i_exception_bucket)) == sizeof(int) || sizeof((nh->rt6i_exception_bucket)) == sizeof(long)) || sizeof((nh->rt6i_exception_bucket)) == sizeof(long long))) __compiletime_assert_687(); } while (0); do { *(volatile typeof((nh->rt6i_exception_bucket)) *)&((nh->rt6i_exception_bucket)) = ((typeof(nh->rt6i_exception_bucket))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_688(void) ; if (!((sizeof(*&nh->rt6i_exception_bucket) == sizeof(char) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(short) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(int) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(long)))) __compiletime_assert_688(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_689(void) ; if (!((sizeof(*&nh->rt6i_exception_bucket) == sizeof(char) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(short) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(int) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(long)) || sizeof(*&nh->rt6i_exception_bucket) == sizeof(long long))) __compiletime_assert_689(); } while (0); do { *(volatile typeof(*&nh->rt6i_exception_bucket) *)&(*&nh->rt6i_exception_bucket) = ((typeof(*((typeof(nh->rt6i_exception_bucket))_r_a_p__v)) *)((typeof(nh->rt6i_exception_bucket))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
} else if (fib6_nh_excptn_bucket_flushed(bucket)) {
err = -22;
goto out;
}
# 1694 "net/ipv6/route.c"
if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
err = -22;
goto out;
}

rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
src_key);
if (rt6_ex)
rt6_remove_exception(bucket, rt6_ex);

rt6_ex = kzalloc(sizeof(*rt6_ex), ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)));
if (!rt6_ex) {
err = -12;
goto out;
}
rt6_ex->rt6i = nrt;
rt6_ex->stamp = jiffies;
hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
bucket->depth++;
net->ipv6.rt6_stats->fib_rt_cache++;


max_depth = 5 + prandom_u32_max(5);
while (bucket->depth > max_depth)
rt6_exception_remove_oldest(bucket);

out:
spin_unlock_bh(&rt6_exception_lock);


if (!err) {
spin_lock_bh(&f6i->fib6_table->tb6_lock);
fib6_update_sernum(net, f6i);
spin_unlock_bh(&f6i->fib6_table->tb6_lock);
fib6_force_start_gc(net);
}

return err;
}

static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
{
struct rt6_exception_bucket *bucket;
struct rt6_exception *rt6_ex;
struct hlist_node *tmp;
int i;

spin_lock_bh(&rt6_exception_lock);

bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
if (!bucket)
goto out;


if (!from)
fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);

for (i = 0; i < (1 << 10); i++) {
for (rt6_ex = ({ typeof((&bucket->chain)->first) ____ptr = ((&bucket->chain)->first); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*rt6_ex) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*rt6_ex) *)(__mptr - __builtin_offsetof(typeof(*rt6_ex), hlist))); }) : ((void *)0); }); rt6_ex && ({ tmp = rt6_ex->hlist.next; 1; }); rt6_ex = ({ typeof(tmp) ____ptr = (tmp); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*rt6_ex) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*rt6_ex) *)(__mptr - __builtin_offsetof(typeof(*rt6_ex), hlist))); }) : ((void *)0); })) {
if (!from ||
({ typeof(*(rt6_ex->rt6i->from)) *__UNIQUE_ID_rcu690 = (typeof(*(rt6_ex->rt6i->from)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_691(void) ; if (!((sizeof((rt6_ex->rt6i->from)) == sizeof(char) || sizeof((rt6_ex->rt6i->from)) == sizeof(short) || sizeof((rt6_ex->rt6i->from)) == sizeof(int) || sizeof((rt6_ex->rt6i->from)) == sizeof(long)) || sizeof((rt6_ex->rt6i->from)) == sizeof(long long))) __compiletime_assert_691(); } while (0); (*(const volatile typeof( _Generic(((rt6_ex->rt6i->from)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt6_ex->rt6i->from)))) *)&((rt6_ex->rt6i->from))); }); ; ((typeof(*(rt6_ex->rt6i->from)) *)(__UNIQUE_ID_rcu690)); }) == from)
rt6_remove_exception(bucket, rt6_ex);
}
({ int __ret_warn_on = !!(!from && bucket->depth); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("net/ipv6/route.c"), "i" (1757), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
bucket++;
}
out:
spin_unlock_bh(&rt6_exception_lock);
}

static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
{
struct fib6_info *f6i = arg;

fib6_nh_flush_exceptions(nh, f6i);

return 0;
}

void rt6_flush_exceptions(struct fib6_info *f6i)
{
if (f6i->nh)
nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions,
f6i);
else
fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
}




static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
const struct in6_addr *src_key = ((void *)0);
struct rt6_exception_bucket *bucket;
struct rt6_exception *rt6_ex;
struct rt6_info *ret = ((void *)0);
# 1810 "net/ipv6/route.c"
bucket = fib6_nh_get_excptn_bucket(res->nh, ((void *)0));
rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);

if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
ret = rt6_ex->rt6i;
# 1824 "net/ipv6/route.c"
return ret;
}


static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
const struct rt6_info *rt)
{
const struct in6_addr *src_key = ((void *)0);
struct rt6_exception_bucket *bucket;
struct rt6_exception *rt6_ex;
int err;

if (!({ typeof(*(nh->rt6i_exception_bucket)) *__UNIQUE_ID_rcu692 = (typeof(*(nh->rt6i_exception_bucket)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_693(void) ; if (!((sizeof((nh->rt6i_exception_bucket)) == sizeof(char) || sizeof((nh->rt6i_exception_bucket)) == sizeof(short) || sizeof((nh->rt6i_exception_bucket)) == sizeof(int) || sizeof((nh->rt6i_exception_bucket)) == sizeof(long)) || sizeof((nh->rt6i_exception_bucket)) == sizeof(long long))) __compiletime_assert_693(); } while (0); (*(const volatile typeof( _Generic(((nh->rt6i_exception_bucket)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->rt6i_exception_bucket)))) *)&((nh->rt6i_exception_bucket))); }); ; ((typeof(*(nh->rt6i_exception_bucket)) *)(__UNIQUE_ID_rcu692)); }))
return -2;

spin_lock_bh(&rt6_exception_lock);
bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
# 1852 "net/ipv6/route.c"
rt6_ex = __rt6_find_exception_spinlock(&bucket,
&rt->rt6i_dst.addr,
src_key);
if (rt6_ex) {
rt6_remove_exception(bucket, rt6_ex);
err = 0;
} else {
err = -2;
}

spin_unlock_bh(&rt6_exception_lock);
return err;
}

struct fib6_nh_excptn_arg {
struct rt6_info *rt;
int plen;
};

static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
{
struct fib6_nh_excptn_arg *arg = _arg;
int err;

err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
if (err == 0)
return 1;

return 0;
}

static int rt6_remove_exception_rt(struct rt6_info *rt)
{
struct fib6_info *from;

from = ({ typeof(*(rt->from)) *__UNIQUE_ID_rcu694 = (typeof(*(rt->from)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_695(void) ; if (!((sizeof((rt->from)) == sizeof(char) || sizeof((rt->from)) == sizeof(short) || sizeof((rt->from)) == sizeof(int) || sizeof((rt->from)) == sizeof(long)) || sizeof((rt->from)) == sizeof(long long))) __compiletime_assert_695(); } while (0); (*(const volatile typeof( _Generic(((rt->from)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->from)))) *)&((rt->from))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->from)) *)(__UNIQUE_ID_rcu694)); });
if (!from || !(rt->rt6i_flags & 0x01000000))
return -22;

if (from->nh) {
struct fib6_nh_excptn_arg arg = {
.rt = rt,
.plen = from->fib6_src.plen
};
int rc;


rc = nexthop_for_each_fib6_nh(from->nh,
rt6_nh_remove_exception_rt,
&arg);
return rc ? 0 : -2;
}

return fib6_nh_remove_exception(from->fib6_nh,
from->fib6_src.plen, rt);
}




static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
const struct rt6_info *rt)
{
const struct in6_addr *src_key = ((void *)0);
struct rt6_exception_bucket *bucket;
struct rt6_exception *rt6_ex;

bucket = fib6_nh_get_excptn_bucket(nh, ((void *)0));
# 1930 "net/ipv6/route.c"
rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
if (rt6_ex)
rt6_ex->stamp = jiffies;
}

struct fib6_nh_match_arg {
const struct net_device *dev;
const struct in6_addr *gw;
struct fib6_nh *match;
};


static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
{
struct fib6_nh_match_arg *arg = _arg;

if (arg->dev != nh->nh_common.nhc_dev ||
(arg->gw && !nh->nh_common.nhc_gw_family) ||
(!arg->gw && nh->nh_common.nhc_gw_family) ||
(arg->gw && !ipv6_addr_equal(arg->gw, &nh->nh_common.nhc_gw.ipv6)))
return 0;

arg->match = nh;


return 1;
}

static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
{
struct fib6_info *from;
struct fib6_nh *fib6_nh;

rcu_read_lock();

from = ({ typeof(*(rt->from)) *__UNIQUE_ID_rcu696 = (typeof(*(rt->from)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_697(void) ; if (!((sizeof((rt->from)) == sizeof(char) || sizeof((rt->from)) == sizeof(short) || sizeof((rt->from)) == sizeof(int) || sizeof((rt->from)) == sizeof(long)) || sizeof((rt->from)) == sizeof(long long))) __compiletime_assert_697(); } while (0); (*(const volatile typeof( _Generic(((rt->from)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->from)))) *)&((rt->from))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->from)) *)(__UNIQUE_ID_rcu696)); });
if (!from || !(rt->rt6i_flags & 0x01000000))
goto unlock;

if (from->nh) {
struct fib6_nh_match_arg arg = {
.dev = rt->dst.dev,
.gw = &rt->rt6i_gateway,
};

nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);

if (!arg.match)
goto unlock;
fib6_nh = arg.match;
} else {
fib6_nh = from->fib6_nh;
}
fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
unlock:
rcu_read_unlock();
}

static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
struct rt6_info *rt, int mtu)
{
# 2001 "net/ipv6/route.c"
if (dst_mtu(&rt->dst) >= mtu)
return true;

if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
return true;

return false;
}

static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
const struct fib6_nh *nh, int mtu)
{
struct rt6_exception_bucket *bucket;
struct rt6_exception *rt6_ex;
int i;

bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
if (!bucket)
return;

for (i = 0; i < (1 << 10); i++) {
for (rt6_ex = ({ typeof((&bucket->chain)->first) ____ptr = ((&bucket->chain)->first); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(rt6_ex)) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(rt6_ex)) *)(__mptr - __builtin_offsetof(typeof(*(rt6_ex)), hlist))); }) : ((void *)0); }); rt6_ex; rt6_ex = ({ typeof((rt6_ex)->hlist.next) ____ptr = ((rt6_ex)->hlist.next); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(rt6_ex)) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(rt6_ex)) *)(__mptr - __builtin_offsetof(typeof(*(rt6_ex)), hlist))); }) : ((void *)0); })) {
struct rt6_info *entry = rt6_ex->rt6i;





if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
rt6_mtu_change_route_allowed(idev, entry, mtu))
dst_metric_set(&entry->dst, RTAX_MTU, mtu);
}
bucket++;
}
}



static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
const struct in6_addr *gateway)
{
struct rt6_exception_bucket *bucket;
struct rt6_exception *rt6_ex;
struct hlist_node *tmp;
int i;

if (!({ typeof(*(nh->rt6i_exception_bucket)) *__UNIQUE_ID_rcu698 = (typeof(*(nh->rt6i_exception_bucket)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_699(void) ; if (!((sizeof((nh->rt6i_exception_bucket)) == sizeof(char) || sizeof((nh->rt6i_exception_bucket)) == sizeof(short) || sizeof((nh->rt6i_exception_bucket)) == sizeof(int) || sizeof((nh->rt6i_exception_bucket)) == sizeof(long)) || sizeof((nh->rt6i_exception_bucket)) == sizeof(long long))) __compiletime_assert_699(); } while (0); (*(const volatile typeof( _Generic(((nh->rt6i_exception_bucket)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->rt6i_exception_bucket)))) *)&((nh->rt6i_exception_bucket))); }); ; ((typeof(*(nh->rt6i_exception_bucket)) *)(__UNIQUE_ID_rcu698)); }))
return;

spin_lock_bh(&rt6_exception_lock);
bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
if (bucket) {
for (i = 0; i < (1 << 10); i++) {
for (rt6_ex = ({ typeof((&bucket->chain)->first) ____ptr = ((&bucket->chain)->first); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*rt6_ex) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*rt6_ex) *)(__mptr - __builtin_offsetof(typeof(*rt6_ex), hlist))); }) : ((void *)0); }); rt6_ex && ({ tmp = rt6_ex->hlist.next; 1; }); rt6_ex = ({ typeof(tmp) ____ptr = (tmp); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*rt6_ex) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*rt6_ex) *)(__mptr - __builtin_offsetof(typeof(*rt6_ex), hlist))); }) : ((void *)0); })) {

struct rt6_info *entry = rt6_ex->rt6i;

if ((entry->rt6i_flags & (0x0002 | 0x01000000)) ==
(0x0002 | 0x01000000) &&
ipv6_addr_equal(gateway,
&entry->rt6i_gateway)) {
rt6_remove_exception(bucket, rt6_ex);
}
}
bucket++;
}
}

spin_unlock_bh(&rt6_exception_lock);
}

static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
struct rt6_exception *rt6_ex,
struct fib6_gc_args *gc_args,
unsigned long now)
{
struct rt6_info *rt = rt6_ex->rt6i;







if (!(rt->rt6i_flags & 0x00400000)) {
if ((({ unsigned long __dummy; typeof(now) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy; typeof(rt->dst.lastuse + gc_args->timeout) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ((long)((now) - (rt->dst.lastuse + gc_args->timeout)) >= 0))) {
do { ; } while (0);
rt6_remove_exception(bucket, rt6_ex);
return;
}
} else if ((({ unsigned long __dummy; typeof(jiffies) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy; typeof(rt->dst.expires) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ((long)((rt->dst.expires) - (jiffies)) < 0))) {
do { ; } while (0);
rt6_remove_exception(bucket, rt6_ex);
return;
}

if (rt->rt6i_flags & 0x0002) {
struct neighbour *neigh;

neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);

if (!(neigh && (neigh->flags & (1 << 7)))) {
do { ; } while (0);

rt6_remove_exception(bucket, rt6_ex);
return;
}
}

gc_args->more++;
}

static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
struct fib6_gc_args *gc_args,
unsigned long now)
{
struct rt6_exception_bucket *bucket;
struct rt6_exception *rt6_ex;
struct hlist_node *tmp;
int i;

if (!({ typeof(*(nh->rt6i_exception_bucket)) *__UNIQUE_ID_rcu700 = (typeof(*(nh->rt6i_exception_bucket)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_701(void) ; if (!((sizeof((nh->rt6i_exception_bucket)) == sizeof(char) || sizeof((nh->rt6i_exception_bucket)) == sizeof(short) || sizeof((nh->rt6i_exception_bucket)) == sizeof(int) || sizeof((nh->rt6i_exception_bucket)) == sizeof(long)) || sizeof((nh->rt6i_exception_bucket)) == sizeof(long long))) __compiletime_assert_701(); } while (0); (*(const volatile typeof( _Generic(((nh->rt6i_exception_bucket)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nh->rt6i_exception_bucket)))) *)&((nh->rt6i_exception_bucket))); }); ; ((typeof(*(nh->rt6i_exception_bucket)) *)(__UNIQUE_ID_rcu700)); }))
return;

rcu_read_lock_bh();
spin_lock(&rt6_exception_lock);
bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
if (bucket) {
for (i = 0; i < (1 << 10); i++) {
for (rt6_ex = ({ typeof((&bucket->chain)->first) ____ptr = ((&bucket->chain)->first); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*rt6_ex) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*rt6_ex) *)(__mptr - __builtin_offsetof(typeof(*rt6_ex), hlist))); }) : ((void *)0); }); rt6_ex && ({ tmp = rt6_ex->hlist.next; 1; }); rt6_ex = ({ typeof(tmp) ____ptr = (tmp); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*rt6_ex) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*rt6_ex) *)(__mptr - __builtin_offsetof(typeof(*rt6_ex), hlist))); }) : ((void *)0); })) {

rt6_age_examine_exception(bucket, rt6_ex,
gc_args, now);
}
bucket++;
}
}
spin_unlock(&rt6_exception_lock);
rcu_read_unlock_bh();
}

struct fib6_nh_age_excptn_arg {
struct fib6_gc_args *gc_args;
unsigned long now;
};

static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
{
struct fib6_nh_age_excptn_arg *arg = _arg;

fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
return 0;
}

void rt6_age_exceptions(struct fib6_info *f6i,
struct fib6_gc_args *gc_args,
unsigned long now)
{
if (f6i->nh) {
struct fib6_nh_age_excptn_arg arg = {
.gc_args = gc_args,
.now = now
};

nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
&arg);
} else {
fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
}
}


int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
struct flowi6 *fl6, struct fib6_result *res, int strict)
{
struct fib6_node *fn, *saved_fn;

fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
saved_fn = fn;

redo_rt6_select:
rt6_select(net, fn, oif, res, strict);
if (res->f6i == net->ipv6.fib6_null_entry) {
fn = fib6_backtrack(fn, &fl6->saddr);
if (fn)
goto redo_rt6_select;
else if (strict & 0x00000002) {

strict &= ~0x00000002;
fn = saved_fn;
goto redo_rt6_select;
}
}

trace_fib6_table_lookup(net, res, table, fl6);

return 0;
}

struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
int oif, struct flowi6 *fl6,
const struct sk_buff *skb, int flags)
{
struct fib6_result res = {};
struct rt6_info *rt = ((void *)0);
int strict = 0;

({ int __ret_warn_on = !!((flags & 0x00000080) && !rcu_read_lock_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("net/ipv6/route.c"), "i" (2209), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });


strict |= flags & 0x00000001;
strict |= flags & 0x00000040;
if (net->ipv6.devconf_all->forwarding == 0)
strict |= 0x00000002;

rcu_read_lock();

fib6_table_lookup(net, table, oif, fl6, &res, strict);
if (res.f6i == net->ipv6.fib6_null_entry)
goto out;

fib6_select_path(net, &res, fl6, oif, false, skb, strict);


rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
if (rt) {
goto out;
} else if (__builtin_expect(!!((fl6->__fl_common.flowic_flags & 0x02) && !res.nh->nh_common.nhc_gw_family), 0)) {






rt = ip6_rt_cache_alloc(&res, &fl6->daddr, ((void *)0));

if (rt) {





rt6_uncached_list_add(rt);
rcu_read_unlock();

return rt;
}
} else {

local_bh_disable();
rt = rt6_get_pcpu_route(&res);

if (!rt)
rt = rt6_make_pcpu_route(net, &res);

local_bh_enable();
}
out:
if (!rt)
rt = net->ipv6.ip6_null_entry;
if (!(flags & 0x00000080))
ip6_hold_safe(net, &rt);
rcu_read_unlock();

return rt;
}
extern typeof(ip6_pol_route) ip6_pol_route; extern const char __kstrtab_ip6_pol_route[]; extern const char __kstrtabns_ip6_pol_route[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "ip6_pol_route" ": \n" " .asciz \"" "ip6_pol_route" "\" \n" "__kstrtabns_" "ip6_pol_route" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab_ip6_pol_route __attribute__((section("___ksymtab" "_gpl" "+" "ip6_pol_route"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&ip6_pol_route, __kstrtab_ip6_pol_route, __kstrtabns_ip6_pol_route };

static struct rt6_info *ip6_pol_route_input(struct net *net,
struct fib6_table *table,
struct flowi6 *fl6,
const struct sk_buff *skb,
int flags)
{
return ip6_pol_route(net, table, fl6->__fl_common.flowic_iif, fl6, skb, flags);
}

struct dst_entry *ip6_route_input_lookup(struct net *net,
struct net_device *dev,
struct flowi6 *fl6,
const struct sk_buff *skb,
int flags)
{
if (rt6_need_strict(&fl6->daddr) && dev->type != 779)
flags |= 0x00000001;

return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
}
extern typeof(ip6_route_input_lookup) ip6_route_input_lookup; extern const char __kstrtab_ip6_route_input_lookup[]; extern const char __kstrtabns_ip6_route_input_lookup[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "ip6_route_input_lookup" ": \n" " .asciz \"" "ip6_route_input_lookup" "\" \n" "__kstrtabns_" "ip6_route_input_lookup" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab_ip6_route_input_lookup __attribute__((section("___ksymtab" "_gpl" "+" "ip6_route_input_lookup"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&ip6_route_input_lookup, __kstrtab_ip6_route_input_lookup, __kstrtabns_ip6_route_input_lookup };

static void ip6_multipath_l3_keys(const struct sk_buff *skb,
struct flow_keys *keys,
struct flow_keys *flkeys)
{
const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
const struct ipv6hdr *key_iph = outer_iph;
struct flow_keys *_flkeys = flkeys;
const struct ipv6hdr *inner_iph;
const struct icmp6hdr *icmph;
struct ipv6hdr _inner_iph;
struct icmp6hdr _icmph;

if (__builtin_expect(!!(outer_iph->nexthdr != 58), 1))
goto out;

icmph = skb_header_pointer(skb, skb_transport_offset(skb),
sizeof(_icmph), &_icmph);
if (!icmph)
goto out;

if (!icmpv6_is_err(icmph->icmp6_type))
goto out;

inner_iph = skb_header_pointer(skb,
skb_transport_offset(skb) + sizeof(*icmph),
sizeof(_inner_iph), &_inner_iph);
if (!inner_iph)
goto out;

key_iph = inner_iph;
_flkeys = ((void *)0);
out:
if (_flkeys) {
keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
keys->tags.flow_label = _flkeys->tags.flow_label;
keys->basic.ip_proto = _flkeys->basic.ip_proto;
} else {
keys->addrs.v6addrs.src = key_iph->saddr;
keys->addrs.v6addrs.dst = key_iph->daddr;
keys->tags.flow_label = ip6_flowlabel(key_iph);
keys->basic.ip_proto = key_iph->nexthdr;
}
}

static u32 rt6_multipath_custom_hash_outer(const struct net *net,
const struct sk_buff *skb,
bool *p_has_inner)
{
u32 hash_fields = ip6_multipath_hash_fields(net);
struct flow_keys keys, hash_keys;

if (!(hash_fields & (((((1UL))) << (0)) | ((((1UL))) << (1)) | ((((1UL))) << (2)) | ((((1UL))) << (3)) | ((((1UL))) << (4)) | ((((1UL))) << (5)))))
return 0;

memset(&hash_keys, 0, sizeof(hash_keys));
skb_flow_dissect_flow_keys(skb, &keys, ((((1UL))) << (2)));

hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
if (hash_fields & ((((1UL))) << (0)))
hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
if (hash_fields & ((((1UL))) << (1)))
hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
if (hash_fields & ((((1UL))) << (2)))
hash_keys.basic.ip_proto = keys.basic.ip_proto;
if (hash_fields & ((((1UL))) << (3)))
hash_keys.tags.flow_label = keys.tags.flow_label;
if (hash_fields & ((((1UL))) << (4)))
hash_keys.ports.src = keys.ports.src;
if (hash_fields & ((((1UL))) << (5)))
hash_keys.ports.dst = keys.ports.dst;

*p_has_inner = !!(keys.control.flags & ((((1UL))) << (2)));
return flow_hash_from_keys(&hash_keys);
}

static u32 rt6_multipath_custom_hash_inner(const struct net *net,
const struct sk_buff *skb,
bool has_inner)
{
u32 hash_fields = ip6_multipath_hash_fields(net);
struct flow_keys keys, hash_keys;





if (!has_inner)
return 0;

if (!(hash_fields & (((((1UL))) << (6)) | ((((1UL))) << (7)) | ((((1UL))) << (8)) | ((((1UL))) << (9)) | ((((1UL))) << (10)) | ((((1UL))) << (11)))))
return 0;

memset(&hash_keys, 0, sizeof(hash_keys));
skb_flow_dissect_flow_keys(skb, &keys, 0);

if (!(keys.control.flags & ((((1UL))) << (2))))
return 0;

if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
if (hash_fields & ((((1UL))) << (6)))
hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
if (hash_fields & ((((1UL))) << (7)))
hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
if (hash_fields & ((((1UL))) << (6)))
hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
if (hash_fields & ((((1UL))) << (7)))
hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
if (hash_fields & ((((1UL))) << (9)))
hash_keys.tags.flow_label = keys.tags.flow_label;
}

if (hash_fields & ((((1UL))) << (8)))
hash_keys.basic.ip_proto = keys.basic.ip_proto;
if (hash_fields & ((((1UL))) << (10)))
hash_keys.ports.src = keys.ports.src;
if (hash_fields & ((((1UL))) << (11)))
hash_keys.ports.dst = keys.ports.dst;

return flow_hash_from_keys(&hash_keys);
}

static u32 rt6_multipath_custom_hash_skb(const struct net *net,
const struct sk_buff *skb)
{
u32 mhash, mhash_inner;
bool has_inner = true;

mhash = rt6_multipath_custom_hash_outer(net, skb, &has_inner);
mhash_inner = rt6_multipath_custom_hash_inner(net, skb, has_inner);

return jhash_2words(mhash, mhash_inner, 0);
}

static u32 rt6_multipath_custom_hash_fl6(const struct net *net,
const struct flowi6 *fl6)
{
u32 hash_fields = ip6_multipath_hash_fields(net);
struct flow_keys hash_keys;

if (!(hash_fields & (((((1UL))) << (0)) | ((((1UL))) << (1)) | ((((1UL))) << (2)) | ((((1UL))) << (3)) | ((((1UL))) << (4)) | ((((1UL))) << (5)))))
return 0;

memset(&hash_keys, 0, sizeof(hash_keys));
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
if (hash_fields & ((((1UL))) << (0)))
hash_keys.addrs.v6addrs.src = fl6->saddr;
if (hash_fields & ((((1UL))) << (1)))
hash_keys.addrs.v6addrs.dst = fl6->daddr;
if (hash_fields & ((((1UL))) << (2)))
hash_keys.basic.ip_proto = fl6->__fl_common.flowic_proto;
if (hash_fields & ((((1UL))) << (3)))
hash_keys.tags.flow_label = ( u32)flowi6_get_flowlabel(fl6);
if (hash_fields & ((((1UL))) << (4)))
hash_keys.ports.src = fl6->uli.ports.sport;
if (hash_fields & ((((1UL))) << (5)))
hash_keys.ports.dst = fl6->uli.ports.dport;

return flow_hash_from_keys(&hash_keys);
}


u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
const struct sk_buff *skb, struct flow_keys *flkeys)
{
struct flow_keys hash_keys;
u32 mhash = 0;

switch (ip6_multipath_hash_policy(net)) {
case 0:
memset(&hash_keys, 0, sizeof(hash_keys));
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
if (skb) {
ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
} else {
hash_keys.addrs.v6addrs.src = fl6->saddr;
hash_keys.addrs.v6addrs.dst = fl6->daddr;
hash_keys.tags.flow_label = ( u32)flowi6_get_flowlabel(fl6);
hash_keys.basic.ip_proto = fl6->__fl_common.flowic_proto;
}
mhash = flow_hash_from_keys(&hash_keys);
break;
case 1:
if (skb) {
unsigned int flag = ((((1UL))) << (2));
struct flow_keys keys;


if (skb->l4_hash)
return skb_get_hash_raw(skb) >> 1;

memset(&hash_keys, 0, sizeof(hash_keys));

if (!flkeys) {
skb_flow_dissect_flow_keys(skb, &keys, flag);
flkeys = &keys;
}
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
hash_keys.ports.src = flkeys->ports.src;
hash_keys.ports.dst = flkeys->ports.dst;
hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
} else {
memset(&hash_keys, 0, sizeof(hash_keys));
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
hash_keys.addrs.v6addrs.src = fl6->saddr;
hash_keys.addrs.v6addrs.dst = fl6->daddr;
hash_keys.ports.src = fl6->uli.ports.sport;
hash_keys.ports.dst = fl6->uli.ports.dport;
hash_keys.basic.ip_proto = fl6->__fl_common.flowic_proto;
}
mhash = flow_hash_from_keys(&hash_keys);
break;
case 2:
memset(&hash_keys, 0, sizeof(hash_keys));
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
if (skb) {
struct flow_keys keys;

if (!flkeys) {
skb_flow_dissect_flow_keys(skb, &keys, 0);
flkeys = &keys;
}


if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
} else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
hash_keys.tags.flow_label = flkeys->tags.flow_label;
hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
} else {

hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
}
} else {

hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
hash_keys.addrs.v6addrs.src = fl6->saddr;
hash_keys.addrs.v6addrs.dst = fl6->daddr;
hash_keys.tags.flow_label = ( u32)flowi6_get_flowlabel(fl6);
hash_keys.basic.ip_proto = fl6->__fl_common.flowic_proto;
}
mhash = flow_hash_from_keys(&hash_keys);
break;
case 3:
if (skb)
mhash = rt6_multipath_custom_hash_skb(net, skb);
else
mhash = rt6_multipath_custom_hash_fl6(net, fl6);
break;
}

return mhash >> 1;
}


void ip6_route_input(struct sk_buff *skb)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct net *net = dev_net(skb->dev);
int flags = 0x00000004 | 0x00000080;
struct ip_tunnel_info *tun_info;
struct flowi6 fl6 = {
.__fl_common.flowic_iif = skb->dev->ifindex,
.daddr = iph->daddr,
.saddr = iph->saddr,
.flowlabel = ip6_flowinfo(iph),
.__fl_common.flowic_mark = skb->mark,
.__fl_common.flowic_proto = iph->nexthdr,
};
struct flow_keys *flkeys = ((void *)0), _flkeys;

tun_info = skb_tunnel_info(skb);
if (tun_info && !(tun_info->mode & 0x01))
fl6.__fl_common.flowic_tun_key.tun_id = tun_info->key.tun_id;

if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
flkeys = &_flkeys;

if (__builtin_expect(!!(fl6.__fl_common.flowic_proto == 58), 0))
fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
skb_dst_drop(skb);
skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
&fl6, skb, flags));
}

static struct rt6_info *ip6_pol_route_output(struct net *net,
struct fib6_table *table,
struct flowi6 *fl6,
const struct sk_buff *skb,
int flags)
{
return ip6_pol_route(net, table, fl6->__fl_common.flowic_oif, fl6, skb, flags);
}

struct dst_entry *ip6_route_output_flags_noref(struct net *net,
const struct sock *sk,
struct flowi6 *fl6, int flags)
{
bool any_src;

if (ipv6_addr_type(&fl6->daddr) &
(0x0002U | 0x0020U)) {
struct dst_entry *dst;


dst = l3mdev_link_scope_lookup(net, fl6);
if (dst)
return dst;
}

fl6->__fl_common.flowic_iif = 1;

flags |= 0x00000080;
any_src = ipv6_addr_any(&fl6->saddr);
if ((sk && sk->__sk_common.skc_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
(fl6->__fl_common.flowic_oif && any_src))
flags |= 0x00000001;

if (!any_src)
flags |= 0x00000004;
else if (sk)
flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);

return fib6_rule_lookup(net, fl6, ((void *)0), flags, ip6_pol_route_output);
}
extern typeof(ip6_route_output_flags_noref) ip6_route_output_flags_noref; extern const char __kstrtab_ip6_route_output_flags_noref[]; extern const char __kstrtabns_ip6_route_output_flags_noref[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "ip6_route_output_flags_noref" ": \n" " .asciz \"" "ip6_route_output_flags_noref" "\" \n" "__kstrtabns_" "ip6_route_output_flags_noref" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab_ip6_route_output_flags_noref __attribute__((section("___ksymtab" "_gpl" "+" "ip6_route_output_flags_noref"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&ip6_route_output_flags_noref, __kstrtab_ip6_route_output_flags_noref, __kstrtabns_ip6_route_output_flags_noref };

struct dst_entry *ip6_route_output_flags(struct net *net,
const struct sock *sk,
struct flowi6 *fl6,
int flags)
{
struct dst_entry *dst;
struct rt6_info *rt6;

rcu_read_lock();
dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
rt6 = (struct rt6_info *)dst;

if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) {
dst = &net->ipv6.ip6_null_entry->dst;
dst_hold(dst);
}
rcu_read_unlock();

return dst;
}
extern typeof(ip6_route_output_flags) ip6_route_output_flags; extern const char __kstrtab_ip6_route_output_flags[]; extern const char __kstrtabns_ip6_route_output_flags[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "ip6_route_output_flags" ": \n" " .asciz \"" "ip6_route_output_flags" "\" \n" "__kstrtabns_" "ip6_route_output_flags" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab_ip6_route_output_flags __attribute__((section("___ksymtab" "_gpl" "+" "ip6_route_output_flags"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&ip6_route_output_flags, __kstrtab_ip6_route_output_flags, __kstrtabns_ip6_route_output_flags };

struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
struct net_device *loopback_dev = net->loopback_dev;
struct dst_entry *new = ((void *)0);

rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2, 0);
if (rt) {
rt6_info_init(rt);
atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);

new = &rt->dst;
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard_out;

dst_copy_metrics(new, &ort->dst);

rt->rt6i_idev = in6_dev_get(loopback_dev);
rt->rt6i_gateway = ort->rt6i_gateway;
rt->rt6i_flags = ort->rt6i_flags & ~0x40000000;

memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));



}

dst_release(dst_orig);
return new ? new : ERR_PTR(-12);
}





static bool fib6_check(struct fib6_info *f6i, u32 cookie)
{
u32 rt_cookie = 0;

if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
return false;

if (fib6_check_expired(f6i))
return false;

return true;
}

static struct dst_entry *rt6_check(struct rt6_info *rt,
struct fib6_info *from,
u32 cookie)
{
u32 rt_cookie = 0;

if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
rt_cookie != cookie)
return ((void *)0);

if (rt6_check_expired(rt))
return ((void *)0);

return &rt->dst;
}

static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
struct fib6_info *from,
u32 cookie)
{
if (!__rt6_check_expired(rt) &&
rt->dst.obsolete == -1 &&
fib6_check(from, cookie))
return &rt->dst;
else
return ((void *)0);
}

static struct dst_entry *ip6_dst_check(struct dst_entry *dst,
u32 cookie)
{
struct dst_entry *dst_ret;
struct fib6_info *from;
struct rt6_info *rt;

rt = ({ void *__mptr = (void *)(dst); _Static_assert(__builtin_types_compatible_p(typeof(*(dst)), typeof(((struct rt6_info *)0)->dst)) || __builtin_types_compatible_p(typeof(*(dst)), typeof(void)), "pointer type mismatch in container_of()"); ((struct rt6_info *)(__mptr - __builtin_offsetof(struct rt6_info, dst))); });

if (rt->sernum)
return rt6_is_valid(rt) ? dst : ((void *)0);

rcu_read_lock();






from = ({ typeof(*(rt->from)) *__UNIQUE_ID_rcu702 = (typeof(*(rt->from)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_703(void) ; if (!((sizeof((rt->from)) == sizeof(char) || sizeof((rt->from)) == sizeof(short) || sizeof((rt->from)) == sizeof(int) || sizeof((rt->from)) == sizeof(long)) || sizeof((rt->from)) == sizeof(long long))) __compiletime_assert_703(); } while (0); (*(const volatile typeof( _Generic(((rt->from)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->from)))) *)&((rt->from))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->from)) *)(__UNIQUE_ID_rcu702)); });

if (from && (rt->rt6i_flags & 0x40000000 ||
__builtin_expect(!!(!list_empty(&rt->rt6i_uncached)), 0)))
dst_ret = rt6_dst_from_check(rt, from, cookie);
else
dst_ret = rt6_check(rt, from, cookie);

rcu_read_unlock();

return dst_ret;
}
;

static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
{
struct rt6_info *rt = (struct rt6_info *) dst;

if (rt) {
if (rt->rt6i_flags & 0x01000000) {
rcu_read_lock();
if (rt6_check_expired(rt)) {
rt6_remove_exception_rt(rt);
dst = ((void *)0);
}
rcu_read_unlock();
} else {
dst_release(dst);
dst = ((void *)0);
}
}
return dst;
}

static void ip6_link_failure(struct sk_buff *skb)
{
struct rt6_info *rt;

icmpv6_send(skb, 1, 3, 0);

rt = (struct rt6_info *) skb_dst(skb);
if (rt) {
rcu_read_lock();
if (rt->rt6i_flags & 0x01000000) {
rt6_remove_exception_rt(rt);
} else {
struct fib6_info *from;
struct fib6_node *fn;

from = ({ typeof(*(rt->from)) *__UNIQUE_ID_rcu704 = (typeof(*(rt->from)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_705(void) ; if (!((sizeof((rt->from)) == sizeof(char) || sizeof((rt->from)) == sizeof(short) || sizeof((rt->from)) == sizeof(int) || sizeof((rt->from)) == sizeof(long)) || sizeof((rt->from)) == sizeof(long long))) __compiletime_assert_705(); } while (0); (*(const volatile typeof( _Generic(((rt->from)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->from)))) *)&((rt->from))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->from)) *)(__UNIQUE_ID_rcu704)); });
if (from) {
fn = ({ typeof(*(from->fib6_node)) *__UNIQUE_ID_rcu706 = (typeof(*(from->fib6_node)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_707(void) ; if (!((sizeof((from->fib6_node)) == sizeof(char) || sizeof((from->fib6_node)) == sizeof(short) || sizeof((from->fib6_node)) == sizeof(int) || sizeof((from->fib6_node)) == sizeof(long)) || sizeof((from->fib6_node)) == sizeof(long long))) __compiletime_assert_707(); } while (0); (*(const volatile typeof( _Generic(((from->fib6_node)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((from->fib6_node)))) *)&((from->fib6_node))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(from->fib6_node)) *)(__UNIQUE_ID_rcu706)); });
if (fn && (rt->rt6i_flags & 0x00010000))
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_708(void) ; if (!((sizeof(fn->fn_sernum) == sizeof(char) || sizeof(fn->fn_sernum) == sizeof(short) || sizeof(fn->fn_sernum) == sizeof(int) || sizeof(fn->fn_sernum) == sizeof(long)) || sizeof(fn->fn_sernum) == sizeof(long long))) __compiletime_assert_708(); } while (0); do { *(volatile typeof(fn->fn_sernum) *)&(fn->fn_sernum) = (-1); } while (0); } while (0);
}
}
rcu_read_unlock();
}
}

static void rt6_update_expires(struct rt6_info *rt0, int timeout)
{
if (!(rt0->rt6i_flags & 0x00400000)) {
struct fib6_info *from;

rcu_read_lock();
from = ({ typeof(*(rt0->from)) *__UNIQUE_ID_rcu709 = (typeof(*(rt0->from)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_710(void) ; if (!((sizeof((rt0->from)) == sizeof(char) || sizeof((rt0->from)) == sizeof(short) || sizeof((rt0->from)) == sizeof(int) || sizeof((rt0->from)) == sizeof(long)) || sizeof((rt0->from)) == sizeof(long long))) __compiletime_assert_710(); } while (0); (*(const volatile typeof( _Generic(((rt0->from)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt0->from)))) *)&((rt0->from))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt0->from)) *)(__UNIQUE_ID_rcu709)); });
if (from)
rt0->dst.expires = from->expires;
rcu_read_unlock();
}

dst_set_expires(&rt0->dst, timeout);
rt0->rt6i_flags |= 0x00400000;
}

static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
{
struct net *net = dev_net(rt->dst.dev);

dst_metric_set(&rt->dst, RTAX_MTU, mtu);
rt->rt6i_flags |= 0x0020;
rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
}

static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
{
return !(rt->rt6i_flags & 0x01000000) &&
(rt->rt6i_flags & 0x40000000 || ({ typeof(*(rt->from)) *__UNIQUE_ID_rcu711 = (typeof(*(rt->from)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_712(void) ; if (!((sizeof((rt->from)) == sizeof(char) || sizeof((rt->from)) == sizeof(short) || sizeof((rt->from)) == sizeof(int) || sizeof((rt->from)) == sizeof(long)) || sizeof((rt->from)) == sizeof(long long))) __compiletime_assert_712(); } while (0); (*(const volatile typeof( _Generic(((rt->from)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->from)))) *)&((rt->from))); }); ; ((typeof(*(rt->from)) *)(__UNIQUE_ID_rcu711)); }));
}

static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
const struct ipv6hdr *iph, u32 mtu,
bool confirm_neigh)
{
const struct in6_addr *daddr, *saddr;
struct rt6_info *rt6 = (struct rt6_info *)dst;






if (iph) {
daddr = &iph->daddr;
saddr = &iph->saddr;
} else if (sk) {
daddr = &sk->__sk_common.skc_v6_daddr;
saddr = &inet6_sk(sk)->saddr;
} else {
daddr = ((void *)0);
saddr = ((void *)0);
}

if (confirm_neigh)
dst_confirm_neigh(dst, daddr);

if (mtu < 1280)
return;
if (mtu >= dst_mtu(dst))
return;

if (!rt6_cache_allowed_for_pmtu(rt6)) {
rt6_do_update_pmtu(rt6, mtu);

if (rt6->rt6i_flags & 0x01000000)
rt6_update_exception_stamp_rt(rt6);
} else if (daddr) {
struct fib6_result res = {};
struct rt6_info *nrt6;

rcu_read_lock();
res.f6i = ({ typeof(*(rt6->from)) *__UNIQUE_ID_rcu713 = (typeof(*(rt6->from)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_714(void) ; if (!((sizeof((rt6->from)) == sizeof(char) || sizeof((rt6->from)) == sizeof(short) || sizeof((rt6->from)) == sizeof(int) || sizeof((rt6->from)) == sizeof(long)) || sizeof((rt6->from)) == sizeof(long long))) __compiletime_assert_714(); } while (0); (*(const volatile typeof( _Generic(((rt6->from)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt6->from)))) *)&((rt6->from))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt6->from)) *)(__UNIQUE_ID_rcu713)); });
if (!res.f6i)
goto out_unlock;

res.fib6_flags = res.f6i->fib6_flags;
res.fib6_type = res.f6i->fib6_type;

if (res.f6i->nh) {
struct fib6_nh_match_arg arg = {
.dev = dst->dev,
.gw = &rt6->rt6i_gateway,
};

nexthop_for_each_fib6_nh(res.f6i->nh,
fib6_nh_find_match, &arg);




if (!arg.match)
goto out_unlock;

res.nh = arg.match;
} else {
res.nh = res.f6i->fib6_nh;
}

nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
if (nrt6) {
rt6_do_update_pmtu(nrt6, mtu);
if (rt6_insert_exception(nrt6, &res))
dst_release_immediate(&nrt6->dst);
}
out_unlock:
rcu_read_unlock();
}
}

static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
bool confirm_neigh)
{
__ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : ((void *)0), mtu,
confirm_neigh);
}

void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
int oif, u32 mark, kuid_t uid)
{
const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
struct dst_entry *dst;
struct flowi6 fl6 = {
.__fl_common.flowic_oif = oif,
.__fl_common.flowic_mark = mark ? mark : ((net)->ipv6.sysctl.fwmark_reflect ? (skb->mark) : 0),
.daddr = iph->daddr,
.saddr = iph->saddr,
.flowlabel = ip6_flowinfo(iph),
.__fl_common.flowic_uid = uid,
};

dst = ip6_route_output(net, ((void *)0), &fl6);
if (!dst->error)
__ip6_rt_update_pmtu(dst, ((void *)0), iph, (__builtin_constant_p((__u32)(( __u32)(__be32)(mtu))) ? ((__u32)( (((__u32)(( __u32)(__be32)(mtu)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(mtu)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(mtu)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(mtu)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(mtu))), true);
dst_release(dst);
}
extern typeof(ip6_update_pmtu) ip6_update_pmtu; extern const char __kstrtab_ip6_update_pmtu[]; extern const char __kstrtabns_ip6_update_pmtu[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "ip6_update_pmtu" ": \n" " .asciz \"" "ip6_update_pmtu" "\" \n" "__kstrtabns_" "ip6_update_pmtu" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab_ip6_update_pmtu __attribute__((section("___ksymtab" "_gpl" "+" "ip6_update_pmtu"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&ip6_update_pmtu, __kstrtab_ip6_update_pmtu, __kstrtabns_ip6_update_pmtu };

void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
{
int oif = sk->__sk_common.skc_bound_dev_if;
struct dst_entry *dst;

if (!oif && skb->dev)
oif = l3mdev_master_ifindex(skb->dev);

ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);

dst = __sk_dst_get(sk);
if (!dst || !dst->obsolete ||
dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
return;

spin_lock(&((sk)->sk_lock.slock));
if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->__sk_common.skc_v6_daddr))
ip6_datagram_dst_update(sk, false);
spin_unlock(&((sk)->sk_lock.slock));
}
extern typeof(ip6_sk_update_pmtu) ip6_sk_update_pmtu; extern const char __kstrtab_ip6_sk_update_pmtu[]; extern const char __kstrtabns_ip6_sk_update_pmtu[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "ip6_sk_update_pmtu" ": \n" " .asciz \"" "ip6_sk_update_pmtu" "\" \n" "__kstrtabns_" "ip6_sk_update_pmtu" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab_ip6_sk_update_pmtu __attribute__((section("___ksymtab" "_gpl" "+" "ip6_sk_update_pmtu"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&ip6_sk_update_pmtu, __kstrtab_ip6_sk_update_pmtu, __kstrtabns_ip6_sk_update_pmtu };

void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
const struct flowi6 *fl6)
{




ip6_dst_store(sk, dst,
ipv6_addr_equal(&fl6->daddr, &sk->__sk_common.skc_v6_daddr) ?
&sk->__sk_common.skc_v6_daddr : ((void *)0),




((void *)0));
}

static bool ip6_redirect_nh_match(const struct fib6_result *res,
struct flowi6 *fl6,
const struct in6_addr *gw,
struct rt6_info **ret)
{
const struct fib6_nh *nh = res->nh;

if (nh->nh_common.nhc_flags & 1 || !nh->nh_common.nhc_gw_family ||
fl6->__fl_common.flowic_oif != nh->nh_common.nhc_dev->ifindex)
return false;






if (!ipv6_addr_equal(gw, &nh->nh_common.nhc_gw.ipv6)) {
struct rt6_info *rt_cache;

rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
if (rt_cache &&
ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
*ret = rt_cache;
return true;
}
return false;
}
return true;
}

struct fib6_nh_rd_arg {
struct fib6_result *res;
struct flowi6 *fl6;
const struct in6_addr *gw;
struct rt6_info **ret;
};

static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
{
struct fib6_nh_rd_arg *arg = _arg;

arg->res->nh = nh;
return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
}


struct ip6rd_flowi {
struct flowi6 fl6;
struct in6_addr gateway;
};

static struct rt6_info *__ip6_route_redirect(struct net *net,
struct fib6_table *table,
struct flowi6 *fl6,
const struct sk_buff *skb,
int flags)
{
struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
struct rt6_info *ret = ((void *)0);
struct fib6_result res = {};
struct fib6_nh_rd_arg arg = {
.res = &res,
.fl6 = fl6,
.gw = &rdfl->gateway,
.ret = &ret
};
struct fib6_info *rt;
struct fib6_node *fn;
# 3065 "net/ipv6/route.c"
rcu_read_lock();
fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
restart:
for (rt = ({ typeof(*((fn)->leaf)) *__UNIQUE_ID_rcu715 = (typeof(*((fn)->leaf)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_716(void) ; if (!((sizeof(((fn)->leaf)) == sizeof(char) || sizeof(((fn)->leaf)) == sizeof(short) || sizeof(((fn)->leaf)) == sizeof(int) || sizeof(((fn)->leaf)) == sizeof(long)) || sizeof(((fn)->leaf)) == sizeof(long long))) __compiletime_assert_716(); } while (0); (*(const volatile typeof( _Generic((((fn)->leaf)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((fn)->leaf)))) *)&(((fn)->leaf))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*((fn)->leaf)) *)(__UNIQUE_ID_rcu715)); }); rt; rt = ({ typeof(*(rt->fib6_next)) *__UNIQUE_ID_rcu717 = (typeof(*(rt->fib6_next)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_718(void) ; if (!((sizeof((rt->fib6_next)) == sizeof(char) || sizeof((rt->fib6_next)) == sizeof(short) || sizeof((rt->fib6_next)) == sizeof(int) || sizeof((rt->fib6_next)) == sizeof(long)) || sizeof((rt->fib6_next)) == sizeof(long long))) __compiletime_assert_718(); } while (0); (*(const volatile typeof( _Generic(((rt->fib6_next)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->fib6_next)))) *)&((rt->fib6_next))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->fib6_next)) *)(__UNIQUE_ID_rcu717)); })) {
res.f6i = rt;
if (fib6_check_expired(rt))
continue;
if (rt->fib6_flags & 0x0200)
break;
if (__builtin_expect(!!(rt->nh), 0)) {
if (nexthop_is_blackhole(rt->nh))
continue;

if (nexthop_for_each_fib6_nh(rt->nh,
fib6_nh_redirect_match,
&arg))
goto out;
} else {
res.nh = rt->fib6_nh;
if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
&ret))
goto out;
}
}

if (!rt)
rt = net->ipv6.fib6_null_entry;
else if (rt->fib6_flags & 0x0200) {
ret = net->ipv6.ip6_null_entry;
goto out;
}

if (rt == net->ipv6.fib6_null_entry) {
fn = fib6_backtrack(fn, &fl6->saddr);
if (fn)
goto restart;
}

res.f6i = rt;
res.nh = rt->fib6_nh;
out:
if (ret) {
ip6_hold_safe(net, &ret);
} else {
res.fib6_flags = res.f6i->fib6_flags;
res.fib6_type = res.f6i->fib6_type;
ret = ip6_create_rt_rcu(&res);
}

rcu_read_unlock();

trace_fib6_table_lookup(net, &res, table, fl6);
return ret;
};

static struct dst_entry *ip6_route_redirect(struct net *net,
const struct flowi6 *fl6,
const struct sk_buff *skb,
const struct in6_addr *gateway)
{
int flags = 0x00000004;
struct ip6rd_flowi rdfl;

rdfl.fl6 = *fl6;
rdfl.gateway = *gateway;

return fib6_rule_lookup(net, &rdfl.fl6, skb,
flags, __ip6_route_redirect);
}

void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
kuid_t uid)
{
const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
struct dst_entry *dst;
struct flowi6 fl6 = {
.__fl_common.flowic_iif = 1,
.__fl_common.flowic_oif = oif,
.__fl_common.flowic_mark = mark,
.daddr = iph->daddr,
.saddr = iph->saddr,
.flowlabel = ip6_flowinfo(iph),
.__fl_common.flowic_uid = uid,
};

dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
rt6_do_redirect(dst, ((void *)0), skb);
dst_release(dst);
}
extern typeof(ip6_redirect) ip6_redirect; extern const char __kstrtab_ip6_redirect[]; extern const char __kstrtabns_ip6_redirect[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "ip6_redirect" ": \n" " .asciz \"" "ip6_redirect" "\" \n" "__kstrtabns_" "ip6_redirect" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab_ip6_redirect __attribute__((section("___ksymtab" "_gpl" "+" "ip6_redirect"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&ip6_redirect, __kstrtab_ip6_redirect, __kstrtabns_ip6_redirect };

void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
struct dst_entry *dst;
struct flowi6 fl6 = {
.__fl_common.flowic_iif = 1,
.__fl_common.flowic_oif = oif,
.daddr = msg->dest,
.saddr = iph->daddr,
.__fl_common.flowic_uid = sock_net_uid(net, ((void *)0)),
};

dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
rt6_do_redirect(dst, ((void *)0), skb);
dst_release(dst);
}

void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
ip6_redirect(skb, sock_net(sk), sk->__sk_common.skc_bound_dev_if, sk->sk_mark,
sk->sk_uid);
}
extern typeof(ip6_sk_redirect) ip6_sk_redirect; extern const char __kstrtab_ip6_sk_redirect[]; extern const char __kstrtabns_ip6_sk_redirect[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "ip6_sk_redirect" ": \n" " .asciz \"" "ip6_sk_redirect" "\" \n" "__kstrtabns_" "ip6_sk_redirect" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab_ip6_sk_redirect __attribute__((section("___ksymtab" "_gpl" "+" "ip6_sk_redirect"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&ip6_sk_redirect, __kstrtab_ip6_sk_redirect, __kstrtabns_ip6_sk_redirect };

static unsigned int ip6_default_advmss(const struct dst_entry *dst)
{
struct net_device *dev = dst->dev;
unsigned int mtu = dst_mtu(dst);
struct net *net = dev_net(dev);

mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);

if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
mtu = net->ipv6.sysctl.ip6_rt_min_advmss;







if (mtu > 65535 - sizeof(struct tcphdr))
mtu = 65535;
return mtu;
}

static unsigned int ip6_mtu(const struct dst_entry *dst)
{
return ip6_dst_mtu_maybe_forward(dst, false);
}
;
# 3217 "net/ipv6/route.c"
u32 ip6_mtu_from_fib6(const struct fib6_result *res,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
const struct fib6_nh *nh = res->nh;
struct fib6_info *f6i = res->f6i;
struct inet6_dev *idev;
struct rt6_info *rt;
u32 mtu = 0;

if (__builtin_expect(!!(fib6_metric_locked(f6i, RTAX_MTU)), 0)) {
mtu = f6i->fib6_metrics->metrics[RTAX_MTU-1];
if (mtu)
goto out;
}

rt = rt6_find_cached_rt(res, daddr, saddr);
if (__builtin_expect(!!(rt), 0)) {
mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
} else {
struct net_device *dev = nh->nh_common.nhc_dev;

mtu = 1280;
idev = __in6_dev_get(dev);
if (idev && idev->cnf.mtu6 > mtu)
mtu = idev->cnf.mtu6;
}

mtu = __builtin_choose_expr(((!!(sizeof((typeof((unsigned int)(mtu)) *)1 == (typeof((unsigned int)((0xFFFF + sizeof(struct ipv6hdr)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned int)(mtu)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned int)((0xFFFF + sizeof(struct ipv6hdr)))) * 0l)) : (int *)8))))), (((unsigned int)(mtu)) < ((unsigned int)((0xFFFF + sizeof(struct ipv6hdr)))) ? ((unsigned int)(mtu)) : ((unsigned int)((0xFFFF + sizeof(struct ipv6hdr))))), ({ typeof((unsigned int)(mtu)) __UNIQUE_ID___x719 = ((unsigned int)(mtu)); typeof((unsigned int)((0xFFFF + sizeof(struct ipv6hdr)))) __UNIQUE_ID___y720 = ((unsigned int)((0xFFFF + sizeof(struct ipv6hdr)))); ((__UNIQUE_ID___x719) < (__UNIQUE_ID___y720) ? (__UNIQUE_ID___x719) : (__UNIQUE_ID___y720)); }));
out:
return mtu - lwtunnel_headroom(nh->nh_common.nhc_lwtstate, mtu);
}

struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
struct flowi6 *fl6)
{
struct dst_entry *dst;
struct rt6_info *rt;
struct inet6_dev *idev = in6_dev_get(dev);
struct net *net = dev_net(dev);

if (__builtin_expect(!!(!idev), 0))
return ERR_PTR(-19);

rt = ip6_dst_alloc(net, dev, 0);
if (__builtin_expect(!!(!rt), 0)) {
in6_dev_put(idev);
dst = ERR_PTR(-12);
goto out;
}

rt->dst.input = ip6_input;
rt->dst.output = ip6_output;
rt->rt6i_gateway = fl6->daddr;
rt->rt6i_dst.addr = fl6->daddr;
rt->rt6i_dst.plen = 128;
rt->rt6i_idev = idev;
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);




rt6_uncached_list_add(rt);

dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), ((void *)0), 0);

out:
return dst;
}

static int ip6_dst_gc(struct dst_ops *ops)
{
struct net *net = ({ void *__mptr = (void *)(ops); _Static_assert(__builtin_types_compatible_p(typeof(*(ops)), typeof(((struct net *)0)->ipv6.ip6_dst_ops)) || __builtin_types_compatible_p(typeof(*(ops)), typeof(void)), "pointer type mismatch in container_of()"); ((struct net *)(__mptr - __builtin_offsetof(struct net, ipv6.ip6_dst_ops))); });
int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
unsigned int val;
int entries;

entries = dst_entries_get_fast(ops);
if (entries > rt_max_size)
entries = dst_entries_get_slow(ops);

if ((({ unsigned long __dummy; typeof(rt_last_gc + rt_min_interval) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy; typeof(jiffies) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ((long)((jiffies) - (rt_last_gc + rt_min_interval)) < 0)) &&
entries <= rt_max_size)
goto out;

fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
entries = dst_entries_get_slow(ops);
if (entries < ops->gc_thresh)
atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
out:
val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
return entries > rt_max_size;
}

static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
const struct in6_addr *gw_addr, u32 tbid,
int flags, struct fib6_result *res)
{
struct flowi6 fl6 = {
.__fl_common.flowic_oif = cfg->fc_ifindex,
.daddr = *gw_addr,
.saddr = cfg->fc_prefsrc,
};
struct fib6_table *table;
int err;

table = fib6_get_table(net, tbid);
if (!table)
return -22;

if (!ipv6_addr_any(&cfg->fc_prefsrc))
flags |= 0x00000004;

flags |= 0x00000040;

err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
if (!err && res->f6i != net->ipv6.fib6_null_entry)
fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
cfg->fc_ifindex != 0, ((void *)0), flags);

return err;
}

static int ip6_route_check_nh_onlink(struct net *net,
struct fib6_config *cfg,
const struct net_device *dev,
struct netlink_ext_ack *extack)
{
u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
const struct in6_addr *gw_addr = &cfg->fc_gateway;
struct fib6_result res = {};
int err;

err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
if (!err && !(res.fib6_flags & 0x0200) &&

!ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
(res.fib6_type != RTN_UNICAST || dev != res.nh->nh_common.nhc_dev)) {
do { static const char __msg[] = "Nexthop has invalid gateway or device mismatch"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);

err = -22;
}

return err;
}

static int ip6_route_check_nh(struct net *net,
struct fib6_config *cfg,
struct net_device **_dev,
struct inet6_dev **idev)
{
const struct in6_addr *gw_addr = &cfg->fc_gateway;
struct net_device *dev = _dev ? *_dev : ((void *)0);
int flags = 0x00000001;
struct fib6_result res = {};
int err = -113;

if (cfg->fc_table) {
err = ip6_nh_lookup_table(net, cfg, gw_addr,
cfg->fc_table, flags, &res);



if (err || res.fib6_flags & 0x0200 ||
res.nh->nh_common.nhc_gw_family ||
(dev && dev != res.nh->nh_common.nhc_dev))
err = -113;
}

if (err < 0) {
struct flowi6 fl6 = {
.__fl_common.flowic_oif = cfg->fc_ifindex,
.daddr = *gw_addr,
};

err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
if (err || res.fib6_flags & 0x0200 ||
res.nh->nh_common.nhc_gw_family)
err = -113;

if (err)
return err;

fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
cfg->fc_ifindex != 0, ((void *)0), flags);
}

err = 0;
if (dev) {
if (dev != res.nh->nh_common.nhc_dev)
err = -113;
} else {
*_dev = dev = res.nh->nh_common.nhc_dev;
dev_hold(dev);
*idev = in6_dev_get(dev);
}

return err;
}

static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
struct net_device **_dev, struct inet6_dev **idev,
struct netlink_ext_ack *extack)
{
const struct in6_addr *gw_addr = &cfg->fc_gateway;
int gwa_type = ipv6_addr_type(gw_addr);
bool skip_dev = gwa_type & 0x0020U ? false : true;
const struct net_device *dev = *_dev;
bool need_addr_check = !dev;
int err = -22;






if (dev &&
ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
do { static const char __msg[] = "Gateway can not be a local address"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
goto out;
}

if (gwa_type != (0x0020U | 0x0001U)) {
# 3454 "net/ipv6/route.c"
if (!(gwa_type & (0x0001U | 0x1000U))) {
do { static const char __msg[] = "Invalid gateway address"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
goto out;
}

rcu_read_lock();

if (cfg->fc_flags & 4)
err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
else
err = ip6_route_check_nh(net, cfg, _dev, idev);

rcu_read_unlock();

if (err)
goto out;
}


dev = *_dev;

err = -22;
if (!dev) {
do { static const char __msg[] = "Egress device not specified"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
goto out;
} else if (dev->flags & IFF_LOOPBACK) {
do { static const char __msg[] = "Egress device can not be loopback device for this route"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);

goto out;
}




if (need_addr_check &&
ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
do { static const char __msg[] = "Gateway can not be a local address"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
goto out;
}

err = 0;
out:
return err;
}

static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
{
if ((flags & 0x0200) ||
(dev && (dev->flags & IFF_LOOPBACK) &&
!(addr_type & 0x0010U) &&
!(flags & (0x00100000 | 0x80000000))))
return true;

return false;
}

int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack)
{
struct net_device *dev = ((void *)0);
struct inet6_dev *idev = ((void *)0);
int addr_type;
int err;

fib6_nh->nh_common.nhc_family = 10;



if (cfg->fc_is_fdb) {
fib6_nh->nh_common.nhc_gw.ipv6 = cfg->fc_gateway;
fib6_nh->nh_common.nhc_gw_family = 10;
return 0;
}

err = -19;
if (cfg->fc_ifindex) {
dev = dev_get_by_index(net, cfg->fc_ifindex);
if (!dev)
goto out;
idev = in6_dev_get(dev);
if (!idev)
goto out;
}

if (cfg->fc_flags & 4) {
if (!dev) {
do { static const char __msg[] = "Nexthop device required for onlink"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);

goto out;
}

if (!(dev->flags & IFF_UP)) {
do { static const char __msg[] = "Nexthop device is not up"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
err = -100;
goto out;
}

fib6_nh->nh_common.nhc_flags |= 4;
}

fib6_nh->nh_common.nhc_weight = 1;




addr_type = ipv6_addr_type(&cfg->fc_dst);
if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {

if (dev != net->loopback_dev) {
if (dev) {
dev_put(dev);
in6_dev_put(idev);
}
dev = net->loopback_dev;
dev_hold(dev);
idev = in6_dev_get(dev);
if (!idev) {
err = -19;
goto out;
}
}
goto pcpu_alloc;
}

if (cfg->fc_flags & 0x0002) {
err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
if (err)
goto out;

fib6_nh->nh_common.nhc_gw.ipv6 = cfg->fc_gateway;
fib6_nh->nh_common.nhc_gw_family = 10;
}

err = -19;
if (!dev)
goto out;

if (idev->cnf.disable_ipv6) {
do { static const char __msg[] = "IPv6 is disabled on nexthop device"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
err = -13;
goto out;
}

if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
do { static const char __msg[] = "Nexthop device is not up"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
err = -100;
goto out;
}

if (!(cfg->fc_flags & (0x80000000 | 0x00100000)) &&
!netif_carrier_ok(dev))
fib6_nh->nh_common.nhc_flags |= 16;

err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap,
cfg->fc_encap_type, cfg, gfp_flags, extack);
if (err)
goto out;

pcpu_alloc:
fib6_nh->rt6i_pcpu = (typeof(struct rt6_info *) *)__alloc_percpu_gfp(sizeof(struct rt6_info *), __alignof__(struct rt6_info *), gfp_flags);
if (!fib6_nh->rt6i_pcpu) {
err = -12;
goto out;
}

fib6_nh->nh_common.nhc_dev = dev;
netdev_tracker_alloc(dev, &fib6_nh->nh_common.nhc_dev_tracker, gfp_flags);

fib6_nh->nh_common.nhc_oif = dev->ifindex;
err = 0;
out:
if (idev)
in6_dev_put(idev);

if (err) {
lwtstate_put(fib6_nh->nh_common.nhc_lwtstate);
fib6_nh->nh_common.nhc_lwtstate = ((void *)0);
dev_put(dev);
}

return err;
}

void fib6_nh_release(struct fib6_nh *fib6_nh)
{
struct rt6_exception_bucket *bucket;

rcu_read_lock();

fib6_nh_flush_exceptions(fib6_nh, ((void *)0));
bucket = fib6_nh_get_excptn_bucket(fib6_nh, ((void *)0));
if (bucket) {
do { uintptr_t _r_a_p__v = (uintptr_t)(((void *)0)); ; if (__builtin_constant_p(((void *)0)) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_721(void) ; if (!((sizeof((fib6_nh->rt6i_exception_bucket)) == sizeof(char) || sizeof((fib6_nh->rt6i_exception_bucket)) == sizeof(short) || sizeof((fib6_nh->rt6i_exception_bucket)) == sizeof(int) || sizeof((fib6_nh->rt6i_exception_bucket)) == sizeof(long)) || sizeof((fib6_nh->rt6i_exception_bucket)) == sizeof(long long))) __compiletime_assert_721(); } while (0); do { *(volatile typeof((fib6_nh->rt6i_exception_bucket)) *)&((fib6_nh->rt6i_exception_bucket)) = ((typeof(fib6_nh->rt6i_exception_bucket))(_r_a_p__v)); } while (0); } while (0); else do { do { } while (0); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_722(void) ; if (!((sizeof(*&fib6_nh->rt6i_exception_bucket) == sizeof(char) || sizeof(*&fib6_nh->rt6i_exception_bucket) == sizeof(short) || sizeof(*&fib6_nh->rt6i_exception_bucket) == sizeof(int) || sizeof(*&fib6_nh->rt6i_exception_bucket) == sizeof(long)))) __compiletime_assert_722(); } while (0); __asm__ __volatile__ ("fence " "rw" "," "w" : : : "memory"); do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_723(void) ; if (!((sizeof(*&fib6_nh->rt6i_exception_bucket) == sizeof(char) || sizeof(*&fib6_nh->rt6i_exception_bucket) == sizeof(short) || sizeof(*&fib6_nh->rt6i_exception_bucket) == sizeof(int) || sizeof(*&fib6_nh->rt6i_exception_bucket) == sizeof(long)) || sizeof(*&fib6_nh->rt6i_exception_bucket) == sizeof(long long))) __compiletime_assert_723(); } while (0); do { *(volatile typeof(*&fib6_nh->rt6i_exception_bucket) *)&(*&fib6_nh->rt6i_exception_bucket) = ((typeof(*((typeof(fib6_nh->rt6i_exception_bucket))_r_a_p__v)) *)((typeof(fib6_nh->rt6i_exception_bucket))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); } while (0);
kfree(bucket);
}

rcu_read_unlock();

fib6_nh_release_dsts(fib6_nh);
free_percpu(fib6_nh->rt6i_pcpu);

fib_nh_common_release(&fib6_nh->nh_common);
}

void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
{
int cpu;

if (!fib6_nh->rt6i_pcpu)
return;

for (((cpu)) = -1; ((cpu)) = cpumask_next(((cpu)), (((const struct cpumask *)&__cpu_possible_mask))), ((cpu)) < nr_cpu_ids;) {
struct rt6_info *pcpu_rt, **ppcpu_rt;

ppcpu_rt = ({ do { const void *__vpp_verify = (typeof((fib6_nh->rt6i_pcpu) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((fib6_nh->rt6i_pcpu))) *)((fib6_nh->rt6i_pcpu))); (typeof((typeof(*((fib6_nh->rt6i_pcpu))) *)((fib6_nh->rt6i_pcpu)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); });
pcpu_rt = ({ typeof(ppcpu_rt) __ai_ptr = (ppcpu_rt); do { } while (0); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__(*(__ai_ptr)) _x_ = (((void *)0)); (__typeof__(*(__ai_ptr))) ({ __typeof__((__ai_ptr)) __ptr = ((__ai_ptr)); __typeof__(_x_) __new = (_x_); __typeof__(*((__ai_ptr))) __ret; switch (sizeof(*(__ai_ptr))) { case 4: __asm__ __volatile__ ( " amoswap.w.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; case 8: __asm__ __volatile__ ( " amoswap.d.aqrl %0, %2, %1\n" : "=r" (__ret), "+A" (*__ptr) : "r" (__new) : "memory"); break; default: do { __attribute__((__noreturn__)) extern void __compiletime_assert_724(void) ; if (!(!(1))) __compiletime_assert_724(); } while (0); } __ret; }); }); });
if (pcpu_rt) {
dst_dev_put(&pcpu_rt->dst);
dst_release(&pcpu_rt->dst);
}
}
}

static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
gfp_t gfp_flags,
struct netlink_ext_ack *extack)
{
struct net *net = cfg->fc_nlinfo.nl_net;
struct fib6_info *rt = ((void *)0);
struct nexthop *nh = ((void *)0);
struct fib6_table *table;
struct fib6_nh *fib6_nh;
int err = -22;
int addr_type;


if (cfg->fc_flags & 0x40000000) {
do { static const char __msg[] = "Userspace can not set RTF_PCPU"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
goto out;
}


if (cfg->fc_flags & 0x01000000) {
do { static const char __msg[] = "Userspace can not set RTF_CACHE"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
goto out;
}

if (cfg->fc_type > (__RTN_MAX - 1)) {
do { static const char __msg[] = "Invalid route type"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
goto out;
}

if (cfg->fc_dst_len > 128) {
do { static const char __msg[] = "Invalid prefix length"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
goto out;
}
if (cfg->fc_src_len > 128) {
do { static const char __msg[] = "Invalid source address length"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
goto out;
}

if (cfg->fc_src_len) {
do { static const char __msg[] = "Specifying source address requires IPV6_SUBTREES to be enabled"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);

goto out;
}

if (cfg->fc_nh_id) {
nh = nexthop_find_by_id(net, cfg->fc_nh_id);
if (!nh) {
do { static const char __msg[] = "Nexthop id does not exist"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
goto out;
}
err = fib6_check_nexthop(nh, cfg, extack);
if (err)
goto out;
}

err = -105;
if (cfg->fc_nlinfo.nlh &&
!(cfg->fc_nlinfo.nlh->nlmsg_flags & 0x400)) {
table = fib6_get_table(net, cfg->fc_table);
if (!table) {
({ do {} while (0); _printk("\001" "4" "IPv6: " "NLM_F_CREATE should be specified when creating new route\n"); });
table = fib6_new_table(net, cfg->fc_table);
}
} else {
table = fib6_new_table(net, cfg->fc_table);
}

if (!table)
goto out;

err = -12;
rt = fib6_info_alloc(gfp_flags, !nh);
if (!rt)
goto out;

rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
extack);
if (IS_ERR(rt->fib6_metrics)) {
err = PTR_ERR(rt->fib6_metrics);

rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
goto out_free;
}

if (cfg->fc_flags & 0x00040000)
rt->dst_nocount = true;

if (cfg->fc_flags & 0x00400000)
fib6_set_expires(rt, jiffies +
clock_t_to_jiffies(cfg->fc_expires));
else
fib6_clean_expires(rt);

if (cfg->fc_protocol == 0)
cfg->fc_protocol = 3;
rt->fib6_protocol = cfg->fc_protocol;

rt->fib6_table = table;
rt->fib6_metric = cfg->fc_metric;
rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
rt->fib6_flags = cfg->fc_flags & ~0x0002;

ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
rt->fib6_dst.plen = cfg->fc_dst_len;





if (nh) {
if (rt->fib6_src.plen) {
do { static const char __msg[] = "Nexthops can not be used with source routing"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
goto out_free;
}
if (!nexthop_get(nh)) {
do { static const char __msg[] = "Nexthop has been deleted"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
goto out_free;
}
rt->nh = nh;
fib6_nh = nexthop_fib6_nh(rt->nh);
} else {
err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
if (err)
goto out;

fib6_nh = rt->fib6_nh;




addr_type = ipv6_addr_type(&cfg->fc_dst);
if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->nh_common.nhc_dev,
addr_type))
rt->fib6_flags = 0x0200 | 0x00200000;
}

if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
struct net_device *dev = fib6_nh->nh_common.nhc_dev;

if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
do { static const char __msg[] = "Invalid source address"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
err = -22;
goto out;
}
rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
rt->fib6_prefsrc.plen = 128;
} else
rt->fib6_prefsrc.plen = 0;

return rt;
out:
fib6_info_release(rt);
return ERR_PTR(err);
out_free:
ip_fib_metrics_put(rt->fib6_metrics);
kfree(rt);
return ERR_PTR(err);
}

int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack)
{
struct fib6_info *rt;
int err;

rt = ip6_route_info_create(cfg, gfp_flags, extack);
if (IS_ERR(rt))
return PTR_ERR(rt);

err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
fib6_info_release(rt);

return err;
}

static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
{
struct net *net = info->nl_net;
struct fib6_table *table;
int err;

if (rt == net->ipv6.fib6_null_entry) {
err = -2;
goto out;
}

table = rt->fib6_table;
spin_lock_bh(&table->tb6_lock);
err = fib6_del(rt, info);
spin_unlock_bh(&table->tb6_lock);

out:
fib6_info_release(rt);
return err;
}

int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify)
{
struct nl_info info = {
.nl_net = net,
.skip_notify = skip_notify
};

return __ip6_del_rt(rt, &info);
}

static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
{
struct nl_info *info = &cfg->fc_nlinfo;
struct net *net = info->nl_net;
struct sk_buff *skb = ((void *)0);
struct fib6_table *table;
int err = -2;

if (rt == net->ipv6.fib6_null_entry)
goto out_put;
table = rt->fib6_table;
spin_lock_bh(&table->tb6_lock);

if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
struct fib6_info *sibling, *next_sibling;
struct fib6_node *fn;


skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
if (skb) {
u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;

if (rt6_fill_node(net, skb, rt, ((void *)0),
((void *)0), ((void *)0), 0, RTM_DELROUTE,
info->portid, seq, 0) < 0) {
kfree_skb(skb);
skb = ((void *)0);
} else
info->skip_notify = 1;
}






info->skip_notify_kernel = 1;
fn = ({ do { } while (0 && (!((lock_is_held(&(&table->tb6_lock)->dep_map))))); ; ((typeof(*(rt->fib6_node)) *)((rt->fib6_node))); });

if (({ typeof(*(fn->leaf)) *__UNIQUE_ID_rcu725 = (typeof(*(fn->leaf)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_726(void) ; if (!((sizeof((fn->leaf)) == sizeof(char) || sizeof((fn->leaf)) == sizeof(short) || sizeof((fn->leaf)) == sizeof(int) || sizeof((fn->leaf)) == sizeof(long)) || sizeof((fn->leaf)) == sizeof(long long))) __compiletime_assert_726(); } while (0); (*(const volatile typeof( _Generic(((fn->leaf)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((fn->leaf)))) *)&((fn->leaf))); }); ; ((typeof(*(fn->leaf)) *)(__UNIQUE_ID_rcu725)); }) == rt) {
struct fib6_info *last_sibling, *replace_rt;

last_sibling = ({ void *__mptr = (void *)((&rt->fib6_siblings)->prev); _Static_assert(__builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->prev)), typeof(((struct fib6_info *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->prev)), typeof(void)), "pointer type mismatch in container_of()"); ((struct fib6_info *)(__mptr - __builtin_offsetof(struct fib6_info, fib6_siblings))); });


replace_rt = ({ do { } while (0 && (!((lock_is_held(&(&table->tb6_lock)->dep_map))))); ; ((typeof(*(last_sibling->fib6_next)) *)((last_sibling->fib6_next))); });


if (replace_rt)
call_fib6_entry_notifiers_replace(net,
replace_rt);
else
call_fib6_multipath_entry_notifiers(net,
FIB_EVENT_ENTRY_DEL,
rt, rt->fib6_nsiblings,
((void *)0));
}
for (sibling = ({ void *__mptr = (void *)((&rt->fib6_siblings)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(((typeof(*sibling) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*sibling) *)(__mptr - __builtin_offsetof(typeof(*sibling), fib6_siblings))); }), next_sibling = ({ void *__mptr = (void *)((sibling)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((sibling)->fib6_siblings.next)), typeof(((typeof(*(sibling)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((sibling)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(sibling)) *)(__mptr - __builtin_offsetof(typeof(*(sibling)), fib6_siblings))); }); !(&sibling->fib6_siblings == (&rt->fib6_siblings)); sibling = next_sibling, next_sibling = ({ void *__mptr = (void *)((next_sibling)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((next_sibling)->fib6_siblings.next)), typeof(((typeof(*(next_sibling)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((next_sibling)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(next_sibling)) *)(__mptr - __builtin_offsetof(typeof(*(next_sibling)), fib6_siblings))); })) {


err = fib6_del(sibling, info);
if (err)
goto out_unlock;
}
}

err = fib6_del(rt, info);
out_unlock:
spin_unlock_bh(&table->tb6_lock);
out_put:
fib6_info_release(rt);

if (skb) {
rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
info->nlh, gfp_any());
}
return err;
}

static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
{
int rc = -3;

if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
goto out;

if (cfg->fc_flags & 0x0002 &&
!ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
goto out;

rc = rt6_remove_exception_rt(rt);
out:
return rc;
}

static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
struct fib6_nh *nh)
{
struct fib6_result res = {
.f6i = rt,
.nh = nh,
};
struct rt6_info *rt_cache;

rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
if (rt_cache)
return __ip6_del_cached_rt(rt_cache, cfg);

return 0;
}

struct fib6_nh_del_cached_rt_arg {
struct fib6_config *cfg;
struct fib6_info *f6i;
};

static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
{
struct fib6_nh_del_cached_rt_arg *arg = _arg;
int rc;

rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
return rc != -3 ? rc : 0;
}

static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
{
struct fib6_nh_del_cached_rt_arg arg = {
.cfg = cfg,
.f6i = f6i
};

return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
}

static int ip6_route_del(struct fib6_config *cfg,
struct netlink_ext_ack *extack)
{
struct fib6_table *table;
struct fib6_info *rt;
struct fib6_node *fn;
int err = -3;

table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
if (!table) {
do { static const char __msg[] = "FIB table does not exist"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
return err;
}

rcu_read_lock();

fn = fib6_locate(&table->tb6_root,
&cfg->fc_dst, cfg->fc_dst_len,
&cfg->fc_src, cfg->fc_src_len,
!(cfg->fc_flags & 0x01000000));

if (fn) {
for (rt = ({ typeof(*((fn)->leaf)) *__UNIQUE_ID_rcu727 = (typeof(*((fn)->leaf)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_728(void) ; if (!((sizeof(((fn)->leaf)) == sizeof(char) || sizeof(((fn)->leaf)) == sizeof(short) || sizeof(((fn)->leaf)) == sizeof(int) || sizeof(((fn)->leaf)) == sizeof(long)) || sizeof(((fn)->leaf)) == sizeof(long long))) __compiletime_assert_728(); } while (0); (*(const volatile typeof( _Generic((((fn)->leaf)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((fn)->leaf)))) *)&(((fn)->leaf))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*((fn)->leaf)) *)(__UNIQUE_ID_rcu727)); }); rt; rt = ({ typeof(*(rt->fib6_next)) *__UNIQUE_ID_rcu729 = (typeof(*(rt->fib6_next)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_730(void) ; if (!((sizeof((rt->fib6_next)) == sizeof(char) || sizeof((rt->fib6_next)) == sizeof(short) || sizeof((rt->fib6_next)) == sizeof(int) || sizeof((rt->fib6_next)) == sizeof(long)) || sizeof((rt->fib6_next)) == sizeof(long long))) __compiletime_assert_730(); } while (0); (*(const volatile typeof( _Generic(((rt->fib6_next)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->fib6_next)))) *)&((rt->fib6_next))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->fib6_next)) *)(__UNIQUE_ID_rcu729)); })) {
struct fib6_nh *nh;

if (rt->nh && cfg->fc_nh_id &&
rt->nh->id != cfg->fc_nh_id)
continue;

if (cfg->fc_flags & 0x01000000) {
int rc = 0;

if (rt->nh) {
rc = ip6_del_cached_rt_nh(cfg, rt);
} else if (cfg->fc_nh_id) {
continue;
} else {
nh = rt->fib6_nh;
rc = ip6_del_cached_rt(cfg, rt, nh);
}
if (rc != -3) {
rcu_read_unlock();
return rc;
}
continue;
}

if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
continue;
if (cfg->fc_protocol &&
cfg->fc_protocol != rt->fib6_protocol)
continue;

if (rt->nh) {
if (!fib6_info_hold_safe(rt))
continue;
rcu_read_unlock();

return __ip6_del_rt(rt, &cfg->fc_nlinfo);
}
if (cfg->fc_nh_id)
continue;

nh = rt->fib6_nh;
if (cfg->fc_ifindex &&
(!nh->nh_common.nhc_dev ||
nh->nh_common.nhc_dev->ifindex != cfg->fc_ifindex))
continue;
if (cfg->fc_flags & 0x0002 &&
!ipv6_addr_equal(&cfg->fc_gateway, &nh->nh_common.nhc_gw.ipv6))
continue;
if (!fib6_info_hold_safe(rt))
continue;
rcu_read_unlock();


if (cfg->fc_flags & 0x0002)
return __ip6_del_rt(rt, &cfg->fc_nlinfo);

return __ip6_del_rt_siblings(rt, cfg);
}
}
rcu_read_unlock();

return err;
}

static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
{
struct netevent_redirect netevent;
struct rt6_info *rt, *nrt = ((void *)0);
struct fib6_result res = {};
struct ndisc_options ndopts;
struct inet6_dev *in6_dev;
struct neighbour *neigh;
struct rd_msg *msg;
int optlen, on_link;
u8 *lladdr;

optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
optlen -= sizeof(*msg);

if (optlen < 0) {
do { if (0) ({ if (0) ({ do {} while (0); _printk("\001" "7" "IPv6: " "rt6_do_redirect: packet too short\n"); }); 0; }); } while (0);
return;
}

msg = (struct rd_msg *)icmp6_hdr(skb);

if (ipv6_addr_is_multicast(&msg->dest)) {
do { if (0) ({ if (0) ({ do {} while (0); _printk("\001" "7" "IPv6: " "rt6_do_redirect: destination address is multicast\n"); }); 0; }); } while (0);
return;
}

on_link = 0;
if (ipv6_addr_equal(&msg->dest, &msg->target)) {
on_link = 1;
} else if (ipv6_addr_type(&msg->target) !=
(0x0001U|0x0020U)) {
do { if (0) ({ if (0) ({ do {} while (0); _printk("\001" "7" "IPv6: " "rt6_do_redirect: target address is not link-local unicast\n"); }); 0; }); } while (0);
return;
}

in6_dev = __in6_dev_get(skb->dev);
if (!in6_dev)
return;
if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
return;






if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
do { if (0) ({ if (0) ({ do {} while (0); _printk("\001" "7" "IPv6: " "rt6_redirect: invalid ND options\n"); }); 0; }); } while (0);
return;
}

lladdr = ((void *)0);
if (ndopts.nd_opt_array[ND_OPT_TARGET_LL_ADDR]) {
lladdr = ndisc_opt_addr_data(ndopts.nd_opt_array[ND_OPT_TARGET_LL_ADDR],
skb->dev);
if (!lladdr) {
do { if (0) ({ if (0) ({ do {} while (0); _printk("\001" "7" "IPv6: " "rt6_redirect: invalid link-layer address length\n"); }); 0; }); } while (0);
return;
}
}

rt = (struct rt6_info *) dst;
if (rt->rt6i_flags & 0x0200) {
do { if (0) ({ if (0) ({ do {} while (0); _printk("\001" "7" "IPv6: " "rt6_redirect: source isn't a valid nexthop for redirect target\n"); }); 0; }); } while (0);
return;
}





dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);

neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
if (!neigh)
return;





ndisc_update(skb->dev, neigh, lladdr, 0x04,
((((1UL))) << (1))|
((((1UL))) << (0))|
(on_link ? 0 : (((((1UL))) << (2))|
((((1UL))) << (6)))),
137, &ndopts);

rcu_read_lock();
res.f6i = ({ typeof(*(rt->from)) *__UNIQUE_ID_rcu731 = (typeof(*(rt->from)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_732(void) ; if (!((sizeof((rt->from)) == sizeof(char) || sizeof((rt->from)) == sizeof(short) || sizeof((rt->from)) == sizeof(int) || sizeof((rt->from)) == sizeof(long)) || sizeof((rt->from)) == sizeof(long long))) __compiletime_assert_732(); } while (0); (*(const volatile typeof( _Generic(((rt->from)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->from)))) *)&((rt->from))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->from)) *)(__UNIQUE_ID_rcu731)); });
if (!res.f6i)
goto out;

if (res.f6i->nh) {
struct fib6_nh_match_arg arg = {
.dev = dst->dev,
.gw = &rt->rt6i_gateway,
};

nexthop_for_each_fib6_nh(res.f6i->nh,
fib6_nh_find_match, &arg);




if (!arg.match)
goto out;
res.nh = arg.match;
} else {
res.nh = res.f6i->fib6_nh;
}

res.fib6_flags = res.f6i->fib6_flags;
res.fib6_type = res.f6i->fib6_type;
nrt = ip6_rt_cache_alloc(&res, &msg->dest, ((void *)0));
if (!nrt)
goto out;

nrt->rt6i_flags = 0x0002|0x0001|0x0010|0x01000000;
if (on_link)
nrt->rt6i_flags &= ~0x0002;

nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;


if (rt6_insert_exception(nrt, &res)) {
dst_release_immediate(&nrt->dst);
goto out;
}

netevent.old = &rt->dst;
netevent.new = &nrt->dst;
netevent.daddr = &msg->dest;
netevent.neigh = neigh;
call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);

out:
rcu_read_unlock();
neigh_release(neigh);
}
# 4322 "net/ipv6/route.c"
struct fib6_info *rt6_get_dflt_router(struct net *net,
const struct in6_addr *addr,
struct net_device *dev)
{
u32 tb_id = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
struct fib6_info *rt;
struct fib6_table *table;

table = fib6_get_table(net, tb_id);
if (!table)
return ((void *)0);

rcu_read_lock();
for (rt = ({ typeof(*((&table->tb6_root)->leaf)) *__UNIQUE_ID_rcu733 = (typeof(*((&table->tb6_root)->leaf)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_734(void) ; if (!((sizeof(((&table->tb6_root)->leaf)) == sizeof(char) || sizeof(((&table->tb6_root)->leaf)) == sizeof(short) || sizeof(((&table->tb6_root)->leaf)) == sizeof(int) || sizeof(((&table->tb6_root)->leaf)) == sizeof(long)) || sizeof(((&table->tb6_root)->leaf)) == sizeof(long long))) __compiletime_assert_734(); } while (0); (*(const volatile typeof( _Generic((((&table->tb6_root)->leaf)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((&table->tb6_root)->leaf)))) *)&(((&table->tb6_root)->leaf))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*((&table->tb6_root)->leaf)) *)(__UNIQUE_ID_rcu733)); }); rt; rt = ({ typeof(*(rt->fib6_next)) *__UNIQUE_ID_rcu735 = (typeof(*(rt->fib6_next)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_736(void) ; if (!((sizeof((rt->fib6_next)) == sizeof(char) || sizeof((rt->fib6_next)) == sizeof(short) || sizeof((rt->fib6_next)) == sizeof(int) || sizeof((rt->fib6_next)) == sizeof(long)) || sizeof((rt->fib6_next)) == sizeof(long long))) __compiletime_assert_736(); } while (0); (*(const volatile typeof( _Generic(((rt->fib6_next)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->fib6_next)))) *)&((rt->fib6_next))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->fib6_next)) *)(__UNIQUE_ID_rcu735)); })) {
struct fib6_nh *nh;


if (rt->nh)
continue;

nh = rt->fib6_nh;
if (dev == nh->nh_common.nhc_dev &&
((rt->fib6_flags & (0x00040000 | 0x00010000)) == (0x00040000 | 0x00010000)) &&
ipv6_addr_equal(&nh->nh_common.nhc_gw.ipv6, addr))
break;
}
if (rt && !fib6_info_hold_safe(rt))
rt = ((void *)0);
rcu_read_unlock();
return rt;
}

struct fib6_info *rt6_add_dflt_router(struct net *net,
const struct in6_addr *gwaddr,
struct net_device *dev,
unsigned int pref,
u32 defrtr_usr_metric)
{
struct fib6_config cfg = {
.fc_table = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN,
.fc_metric = defrtr_usr_metric,
.fc_ifindex = dev->ifindex,
.fc_flags = 0x0002 | 0x00040000 | 0x00010000 |
0x0001 | 0x00400000 | ((pref) << 27),
.fc_protocol = 9,
.fc_type = RTN_UNICAST,
.fc_nlinfo.portid = 0,
.fc_nlinfo.nlh = ((void *)0),
.fc_nlinfo.nl_net = net,
};

cfg.fc_gateway = *gwaddr;

if (!ip6_route_add(&cfg, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)), ((void *)0))) {
struct fib6_table *table;

table = fib6_get_table(dev_net(dev), cfg.fc_table);
if (table)
table->flags |= ((((1UL))) << (0));
}

return rt6_get_dflt_router(net, gwaddr, dev);
}

static void __rt6_purge_dflt_routers(struct net *net,
struct fib6_table *table)
{
struct fib6_info *rt;

restart:
rcu_read_lock();
for (rt = ({ typeof(*((&table->tb6_root)->leaf)) *__UNIQUE_ID_rcu737 = (typeof(*((&table->tb6_root)->leaf)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_738(void) ; if (!((sizeof(((&table->tb6_root)->leaf)) == sizeof(char) || sizeof(((&table->tb6_root)->leaf)) == sizeof(short) || sizeof(((&table->tb6_root)->leaf)) == sizeof(int) || sizeof(((&table->tb6_root)->leaf)) == sizeof(long)) || sizeof(((&table->tb6_root)->leaf)) == sizeof(long long))) __compiletime_assert_738(); } while (0); (*(const volatile typeof( _Generic((((&table->tb6_root)->leaf)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((&table->tb6_root)->leaf)))) *)&(((&table->tb6_root)->leaf))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*((&table->tb6_root)->leaf)) *)(__UNIQUE_ID_rcu737)); }); rt; rt = ({ typeof(*(rt->fib6_next)) *__UNIQUE_ID_rcu739 = (typeof(*(rt->fib6_next)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_740(void) ; if (!((sizeof((rt->fib6_next)) == sizeof(char) || sizeof((rt->fib6_next)) == sizeof(short) || sizeof((rt->fib6_next)) == sizeof(int) || sizeof((rt->fib6_next)) == sizeof(long)) || sizeof((rt->fib6_next)) == sizeof(long long))) __compiletime_assert_740(); } while (0); (*(const volatile typeof( _Generic(((rt->fib6_next)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->fib6_next)))) *)&((rt->fib6_next))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->fib6_next)) *)(__UNIQUE_ID_rcu739)); })) {
struct net_device *dev = fib6_info_nh_dev(rt);
struct inet6_dev *idev = dev ? __in6_dev_get(dev) : ((void *)0);

if (rt->fib6_flags & (0x00010000 | 0x00040000) &&
(!idev || idev->cnf.accept_ra != 2) &&
fib6_info_hold_safe(rt)) {
rcu_read_unlock();
ip6_del_rt(net, rt, false);
goto restart;
}
}
rcu_read_unlock();

table->flags &= ~((((1UL))) << (0));
}

void rt6_purge_dflt_routers(struct net *net)
{
struct fib6_table *table;
struct hlist_head *head;
unsigned int h;

rcu_read_lock();

for (h = 0; h < 1; h++) {
head = &net->ipv6.fib_table_hash[h];
for (({ ; }), table = ({ typeof(({ typeof((*((struct hlist_node **)(&(head)->first)))) __UNIQUE_ID_rcu741 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_742(void) ; if (!((sizeof((*((struct hlist_node **)(&(head)->first)))) == sizeof(char) || sizeof((*((struct hlist_node **)(&(head)->first)))) == sizeof(short) || sizeof((*((struct hlist_node **)(&(head)->first)))) == sizeof(int) || sizeof((*((struct hlist_node **)(&(head)->first)))) == sizeof(long)) || sizeof((*((struct hlist_node **)(&(head)->first)))) == sizeof(long long))) __compiletime_assert_742(); } while (0); (*(const volatile typeof( _Generic(((*((struct hlist_node **)(&(head)->first)))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((*((struct hlist_node **)(&(head)->first)))))) *)&((*((struct hlist_node **)(&(head)->first))))); }); ((typeof(*(*((struct hlist_node **)(&(head)->first)))) *)(__UNIQUE_ID_rcu741)); })) ____ptr = (({ typeof((*((struct hlist_node **)(&(head)->first)))) __UNIQUE_ID_rcu741 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_742(void) ; if (!((sizeof((*((struct hlist_node **)(&(head)->first)))) == sizeof(char) || sizeof((*((struct hlist_node **)(&(head)->first)))) == sizeof(short) || sizeof((*((struct hlist_node **)(&(head)->first)))) == sizeof(int) || sizeof((*((struct hlist_node **)(&(head)->first)))) == sizeof(long)) || sizeof((*((struct hlist_node **)(&(head)->first)))) == sizeof(long long))) __compiletime_assert_742(); } while (0); (*(const volatile typeof( _Generic(((*((struct hlist_node **)(&(head)->first)))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((*((struct hlist_node **)(&(head)->first)))))) *)&((*((struct hlist_node **)(&(head)->first))))); }); ((typeof(*(*((struct hlist_node **)(&(head)->first)))) *)(__UNIQUE_ID_rcu741)); })); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(table)) *)0)->tb6_hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(table)) *)(__mptr - __builtin_offsetof(typeof(*(table)), tb6_hlist))); }) : ((void *)0); }); table; table = ({ typeof(({ typeof((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) __UNIQUE_ID_rcu743 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_744(void) ; if (!((sizeof((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) == sizeof(char) || sizeof((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) == sizeof(short) || sizeof((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) == sizeof(int) || sizeof((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) == sizeof(long)) || sizeof((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) == sizeof(long long))) __compiletime_assert_744(); } while (0); (*(const volatile typeof( _Generic(((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))))) *)&((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next))))); }); ((typeof(*(*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) *)(__UNIQUE_ID_rcu743)); })) ____ptr = (({ typeof((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) __UNIQUE_ID_rcu743 = ({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_744(void) ; if (!((sizeof((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) == sizeof(char) || sizeof((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) == sizeof(short) || sizeof((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) == sizeof(int) || sizeof((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) == sizeof(long)) || sizeof((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) == sizeof(long long))) __compiletime_assert_744(); } while (0); (*(const volatile typeof( _Generic(((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))))) *)&((*((struct hlist_node **)(&(&(table)->tb6_hlist)->next))))); }); ((typeof(*(*((struct hlist_node **)(&(&(table)->tb6_hlist)->next)))) *)(__UNIQUE_ID_rcu743)); })); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(table)) *)0)->tb6_hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(table)) *)(__mptr - __builtin_offsetof(typeof(*(table)), tb6_hlist))); }) : ((void *)0); })) {
if (table->flags & ((((1UL))) << (0)))
__rt6_purge_dflt_routers(net, table);
}
}

rcu_read_unlock();
}

static void rtmsg_to_fib6_config(struct net *net,
struct in6_rtmsg *rtmsg,
struct fib6_config *cfg)
{
*cfg = (struct fib6_config){
.fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
: RT_TABLE_MAIN,
.fc_ifindex = rtmsg->rtmsg_ifindex,
.fc_metric = rtmsg->rtmsg_metric ? : 1024,
.fc_expires = rtmsg->rtmsg_info,
.fc_dst_len = rtmsg->rtmsg_dst_len,
.fc_src_len = rtmsg->rtmsg_src_len,
.fc_flags = rtmsg->rtmsg_flags,
.fc_type = rtmsg->rtmsg_type,

.fc_nlinfo.nl_net = net,

.fc_dst = rtmsg->rtmsg_dst,
.fc_src = rtmsg->rtmsg_src,
.fc_gateway = rtmsg->rtmsg_gateway,
};
}

int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
{
struct fib6_config cfg;
int err;

if (cmd != 0x890B && cmd != 0x890C)
return -22;
if (!ns_capable(net->user_ns, 12))
return -1;

rtmsg_to_fib6_config(net, rtmsg, &cfg);

rtnl_lock();
switch (cmd) {
case 0x890B:
err = ip6_route_add(&cfg, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)), ((void *)0));
break;
case 0x890C:
err = ip6_route_del(&cfg, ((void *)0));
break;
}
rtnl_unlock();
return err;
}





static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
{
struct dst_entry *dst = skb_dst(skb);
struct net *net = dev_net(dst->dev);
struct inet6_dev *idev;
int type;

if (netif_is_l3_master(skb->dev) ||
dst->dev == net->loopback_dev)
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, ((struct inet6_skb_parm*)((skb)->cb))->iif));
else
idev = ip6_dst_idev(dst);

switch (ipstats_mib_noroutes) {
case IPSTATS_MIB_INNOROUTES:
type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
if (type == 0x0000U) {
({ struct inet6_dev *_idev = (idev); if (__builtin_expect(!!(_idev != ((void *)0)), 1)) do { do { const void *__vpp_verify = (typeof((&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)])) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)]))); (typeof((typeof(*(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)]))); (typeof((typeof(*(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)]))); (typeof((typeof(*(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)]))); (typeof((typeof(*(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((_idev)->stats.ipv6->mibs[(IPSTATS_MIB_INADDRERRORS)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); do { do { const void *__vpp_verify = (typeof((&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)])) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)]))); (typeof((typeof(*(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)]))); (typeof((typeof(*(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)]))); (typeof((typeof(*(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)]))); (typeof((typeof(*(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)]))) *)(&((net)->mib.ipv6_statistics->mibs[(IPSTATS_MIB_INADDRERRORS)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);});
break;
}
__attribute__((__fallthrough__));
case IPSTATS_MIB_OUTNOROUTES:
({ struct inet6_dev *_idev = (idev); if (__builtin_expect(!!(_idev != ((void *)0)), 1)) do { do { const void *__vpp_verify = (typeof((&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)])) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)]))) *)(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)]))); (typeof((typeof(*(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)]))) *)(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)]))) *)(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)]))); (typeof((typeof(*(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)]))) *)(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)]))) *)(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)]))); (typeof((typeof(*(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)]))) *)(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)]))) *)(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)]))); (typeof((typeof(*(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)]))) *)(&((_idev)->stats.ipv6->mibs[(ipstats_mib_noroutes)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); do { do { const void *__vpp_verify = (typeof((&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)])) { case 1: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)]))) *)(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)]))); (typeof((typeof(*(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)]))) *)(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 2: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)]))) *)(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)]))); (typeof((typeof(*(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)]))) *)(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 4: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)]))) *)(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)]))); (typeof((typeof(*(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)]))) *)(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; case 8: do { unsigned long __flags; do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __flags = arch_local_irq_save(); } while (0); do { *({ do { const void *__vpp_verify = (typeof((&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)]))) *)(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)]))); (typeof((typeof(*(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)]))) *)(&((net)->mib.ipv6_statistics->mibs[(ipstats_mib_noroutes)])))) (__ptr + (((__per_cpu_offset[(((struct thread_info *)get_current())->cpu)])))); }); }) += 1; } while (0); do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { } while (0); arch_local_irq_restore(__flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);});
break;
}


if (netif_is_l3_master(skb->dev))
skb_dst_drop(skb);

icmpv6_send(skb, 1, code, 0);
kfree_skb(skb);
return 0;
}

static int ip6_pkt_discard(struct sk_buff *skb)
{
return ip6_pkt_drop(skb, 0, IPSTATS_MIB_INNOROUTES);
}

static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
skb->dev = skb_dst(skb)->dev;
return ip6_pkt_drop(skb, 0, IPSTATS_MIB_OUTNOROUTES);
}

static int ip6_pkt_prohibit(struct sk_buff *skb)
{
return ip6_pkt_drop(skb, 1, IPSTATS_MIB_INNOROUTES);
}

static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
skb->dev = skb_dst(skb)->dev;
return ip6_pkt_drop(skb, 1, IPSTATS_MIB_OUTNOROUTES);
}





struct fib6_info *addrconf_f6i_alloc(struct net *net,
struct inet6_dev *idev,
const struct in6_addr *addr,
bool anycast, gfp_t gfp_flags)
{
struct fib6_config cfg = {
.fc_table = l3mdev_fib_table(idev->dev) ? : RT_TABLE_MAIN,
.fc_ifindex = idev->dev->ifindex,
.fc_flags = 0x0001 | 0x00200000,
.fc_dst = *addr,
.fc_dst_len = 128,
.fc_protocol = 2,
.fc_nlinfo.nl_net = net,
.fc_ignore_dev_down = true,
};
struct fib6_info *f6i;

if (anycast) {
cfg.fc_type = RTN_ANYCAST;
cfg.fc_flags |= 0x00100000;
} else {
cfg.fc_type = RTN_LOCAL;
cfg.fc_flags |= 0x80000000;
}

f6i = ip6_route_info_create(&cfg, gfp_flags, ((void *)0));
if (!IS_ERR(f6i))
f6i->dst_nocount = true;
return f6i;
}


struct arg_dev_net_ip {
struct net_device *dev;
struct net *net;
struct in6_addr *addr;
};

static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
{
struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
struct net *net = ((struct arg_dev_net_ip *)arg)->net;
struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;

if (!rt->nh &&
((void *)rt->fib6_nh->nh_common.nhc_dev == dev || !dev) &&
rt != net->ipv6.fib6_null_entry &&
ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
spin_lock_bh(&rt6_exception_lock);

rt->fib6_prefsrc.plen = 0;
spin_unlock_bh(&rt6_exception_lock);
}
return 0;
}

void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
{
struct net *net = dev_net(ifp->idev->dev);
struct arg_dev_net_ip adni = {
.dev = ifp->idev->dev,
.net = net,
.addr = &ifp->addr,
};
fib6_clean_all(net, fib6_remove_prefsrc, &adni);
}




static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
{
struct in6_addr *gateway = (struct in6_addr *)arg;
struct fib6_nh *nh;


if (rt->nh)
return 0;

nh = rt->fib6_nh;
if (((rt->fib6_flags & (0x00040000 | 0x00010000)) == (0x00040000 | 0x00010000)) &&
nh->nh_common.nhc_gw_family && ipv6_addr_equal(gateway, &nh->nh_common.nhc_gw.ipv6))
return -1;





fib6_nh_exceptions_clean_tohost(nh, gateway);

return 0;
}

void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
{
fib6_clean_all(net, fib6_clean_tohost, gateway);
}

struct arg_netdev_event {
const struct net_device *dev;
union {
unsigned char nh_flags;
unsigned long event;
};
};

static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
{
struct fib6_info *iter;
struct fib6_node *fn;

fn = ({ do { } while (0 && (!((lock_is_held(&(&rt->fib6_table->tb6_lock)->dep_map))))); ; ((typeof(*(rt->fib6_node)) *)((rt->fib6_node))); });

iter = ({ do { } while (0 && (!((lock_is_held(&(&rt->fib6_table->tb6_lock)->dep_map))))); ; ((typeof(*(fn->leaf)) *)((fn->leaf))); });

while (iter) {
if (iter->fib6_metric == rt->fib6_metric &&
rt6_qualify_for_ecmp(iter))
return iter;
iter = ({ do { } while (0 && (!((lock_is_held(&(&rt->fib6_table->tb6_lock)->dep_map))))); ; ((typeof(*(iter->fib6_next)) *)((iter->fib6_next))); });

}

return ((void *)0);
}


static bool rt6_is_dead(const struct fib6_info *rt)
{
if (rt->fib6_nh->nh_common.nhc_flags & 1 ||
(rt->fib6_nh->nh_common.nhc_flags & 16 &&
ip6_ignore_linkdown(rt->fib6_nh->nh_common.nhc_dev)))
return true;

return false;
}

static int rt6_multipath_total_weight(const struct fib6_info *rt)
{
struct fib6_info *iter;
int total = 0;

if (!rt6_is_dead(rt))
total += rt->fib6_nh->nh_common.nhc_weight;

for (iter = ({ void *__mptr = (void *)((&rt->fib6_siblings)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(((typeof(*iter) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*iter) *)(__mptr - __builtin_offsetof(typeof(*iter), fib6_siblings))); }); !(&iter->fib6_siblings == (&rt->fib6_siblings)); iter = ({ void *__mptr = (void *)((iter)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((iter)->fib6_siblings.next)), typeof(((typeof(*(iter)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((iter)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(iter)) *)(__mptr - __builtin_offsetof(typeof(*(iter)), fib6_siblings))); })) {
if (!rt6_is_dead(iter))
total += iter->fib6_nh->nh_common.nhc_weight;
}

return total;
}

static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
{
int upper_bound = -1;

if (!rt6_is_dead(rt)) {
*weight += rt->fib6_nh->nh_common.nhc_weight;
upper_bound = ( { typeof(total) __d = total; unsigned long long _tmp = ((u64) (*weight) << 31) + (__d) / 2; ({ uint32_t __base = (__d); uint32_t __rem; __rem = ((uint64_t)(_tmp)) % __base; (_tmp) = ((uint64_t)(_tmp)) / __base; __rem; }); _tmp; } ) - 1;

}
atomic_set(&rt->fib6_nh->nh_common.nhc_upper_bound, upper_bound);
}

static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
{
struct fib6_info *iter;
int weight = 0;

rt6_upper_bound_set(rt, &weight, total);

for (iter = ({ void *__mptr = (void *)((&rt->fib6_siblings)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(((typeof(*iter) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*iter) *)(__mptr - __builtin_offsetof(typeof(*iter), fib6_siblings))); }); !(&iter->fib6_siblings == (&rt->fib6_siblings)); iter = ({ void *__mptr = (void *)((iter)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((iter)->fib6_siblings.next)), typeof(((typeof(*(iter)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((iter)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(iter)) *)(__mptr - __builtin_offsetof(typeof(*(iter)), fib6_siblings))); }))
rt6_upper_bound_set(iter, &weight, total);
}

void rt6_multipath_rebalance(struct fib6_info *rt)
{
struct fib6_info *first;
int total;





if (!rt->fib6_nsiblings || rt->should_flush)
return;





first = rt6_multipath_first_sibling(rt);
if (({ int __ret_warn_on = !!(!first); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("net/ipv6/route.c"), "i" (4735), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }))
return;

total = rt6_multipath_total_weight(first);
rt6_multipath_upper_bound_set(first, total);
}

static int fib6_ifup(struct fib6_info *rt, void *p_arg)
{
const struct arg_netdev_event *arg = p_arg;
struct net *net = dev_net(arg->dev);

if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
rt->fib6_nh->nh_common.nhc_dev == arg->dev) {
rt->fib6_nh->nh_common.nhc_flags &= ~arg->nh_flags;
fib6_update_sernum_upto_root(net, rt);
rt6_multipath_rebalance(rt);
}

return 0;
}

void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
{
struct arg_netdev_event arg = {
.dev = dev,
{
.nh_flags = nh_flags,
},
};

if (nh_flags & 1 && netif_carrier_ok(dev))
arg.nh_flags |= 16;

fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
}


static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
const struct net_device *dev)
{
struct fib6_info *iter;

if (rt->fib6_nh->nh_common.nhc_dev == dev)
return true;
for (iter = ({ void *__mptr = (void *)((&rt->fib6_siblings)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(((typeof(*iter) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*iter) *)(__mptr - __builtin_offsetof(typeof(*iter), fib6_siblings))); }); !(&iter->fib6_siblings == (&rt->fib6_siblings)); iter = ({ void *__mptr = (void *)((iter)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((iter)->fib6_siblings.next)), typeof(((typeof(*(iter)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((iter)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(iter)) *)(__mptr - __builtin_offsetof(typeof(*(iter)), fib6_siblings))); }))
if (iter->fib6_nh->nh_common.nhc_dev == dev)
return true;

return false;
}

static void rt6_multipath_flush(struct fib6_info *rt)
{
struct fib6_info *iter;

rt->should_flush = 1;
for (iter = ({ void *__mptr = (void *)((&rt->fib6_siblings)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(((typeof(*iter) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*iter) *)(__mptr - __builtin_offsetof(typeof(*iter), fib6_siblings))); }); !(&iter->fib6_siblings == (&rt->fib6_siblings)); iter = ({ void *__mptr = (void *)((iter)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((iter)->fib6_siblings.next)), typeof(((typeof(*(iter)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((iter)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(iter)) *)(__mptr - __builtin_offsetof(typeof(*(iter)), fib6_siblings))); }))
iter->should_flush = 1;
}

static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
const struct net_device *down_dev)
{
struct fib6_info *iter;
unsigned int dead = 0;

if (rt->fib6_nh->nh_common.nhc_dev == down_dev ||
rt->fib6_nh->nh_common.nhc_flags & 1)
dead++;
for (iter = ({ void *__mptr = (void *)((&rt->fib6_siblings)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(((typeof(*iter) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*iter) *)(__mptr - __builtin_offsetof(typeof(*iter), fib6_siblings))); }); !(&iter->fib6_siblings == (&rt->fib6_siblings)); iter = ({ void *__mptr = (void *)((iter)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((iter)->fib6_siblings.next)), typeof(((typeof(*(iter)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((iter)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(iter)) *)(__mptr - __builtin_offsetof(typeof(*(iter)), fib6_siblings))); }))
if (iter->fib6_nh->nh_common.nhc_dev == down_dev ||
iter->fib6_nh->nh_common.nhc_flags & 1)
dead++;

return dead;
}

static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
const struct net_device *dev,
unsigned char nh_flags)
{
struct fib6_info *iter;

if (rt->fib6_nh->nh_common.nhc_dev == dev)
rt->fib6_nh->nh_common.nhc_flags |= nh_flags;
for (iter = ({ void *__mptr = (void *)((&rt->fib6_siblings)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(((typeof(*iter) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*iter) *)(__mptr - __builtin_offsetof(typeof(*iter), fib6_siblings))); }); !(&iter->fib6_siblings == (&rt->fib6_siblings)); iter = ({ void *__mptr = (void *)((iter)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((iter)->fib6_siblings.next)), typeof(((typeof(*(iter)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((iter)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(iter)) *)(__mptr - __builtin_offsetof(typeof(*(iter)), fib6_siblings))); }))
if (iter->fib6_nh->nh_common.nhc_dev == dev)
iter->fib6_nh->nh_common.nhc_flags |= nh_flags;
}


static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
{
const struct arg_netdev_event *arg = p_arg;
const struct net_device *dev = arg->dev;
struct net *net = dev_net(dev);

if (rt == net->ipv6.fib6_null_entry || rt->nh)
return 0;

switch (arg->event) {
case NETDEV_UNREGISTER:
return rt->fib6_nh->nh_common.nhc_dev == dev ? -1 : 0;
case NETDEV_DOWN:
if (rt->should_flush)
return -1;
if (!rt->fib6_nsiblings)
return rt->fib6_nh->nh_common.nhc_dev == dev ? -1 : 0;
if (rt6_multipath_uses_dev(rt, dev)) {
unsigned int count;

count = rt6_multipath_dead_count(rt, dev);
if (rt->fib6_nsiblings + 1 == count) {
rt6_multipath_flush(rt);
return -1;
}
rt6_multipath_nh_flags_set(rt, dev, 1 |
16);
fib6_update_sernum(net, rt);
rt6_multipath_rebalance(rt);
}
return -2;
case NETDEV_CHANGE:
if (rt->fib6_nh->nh_common.nhc_dev != dev ||
rt->fib6_flags & (0x80000000 | 0x00100000))
break;
rt->fib6_nh->nh_common.nhc_flags |= 16;
rt6_multipath_rebalance(rt);
break;
}

return 0;
}

void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
{
struct arg_netdev_event arg = {
.dev = dev,
{
.event = event,
},
};
struct net *net = dev_net(dev);

if (net->ipv6.sysctl.skip_notify_on_dev_down)
fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
else
fib6_clean_all(net, fib6_ifdown, &arg);
}

void rt6_disable_ip(struct net_device *dev, unsigned long event)
{
rt6_sync_down_dev(dev, event);
rt6_uncached_list_flush_dev(dev);
neigh_ifdown(&nd_tbl, dev);
}

struct rt6_mtu_change_arg {
struct net_device *dev;
unsigned int mtu;
struct fib6_info *f6i;
};

static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
{
struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
struct fib6_info *f6i = arg->f6i;






if (nh->nh_common.nhc_dev == arg->dev) {
struct inet6_dev *idev = __in6_dev_get(arg->dev);
u32 mtu = f6i->fib6_metrics->metrics[RTAX_MTU-1];

if (mtu >= arg->mtu ||
(mtu < arg->mtu && mtu == idev->cnf.mtu6))
fib6_metric_set(f6i, RTAX_MTU, arg->mtu);

spin_lock_bh(&rt6_exception_lock);
rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
spin_unlock_bh(&rt6_exception_lock);
}

return 0;
}

static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
{
struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
struct inet6_dev *idev;







idev = __in6_dev_get(arg->dev);
if (!idev)
return 0;

if (fib6_metric_locked(f6i, RTAX_MTU))
return 0;

arg->f6i = f6i;
if (f6i->nh) {

return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
arg);
}

return fib6_nh_mtu_change(f6i->fib6_nh, arg);
}

void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
{
struct rt6_mtu_change_arg arg = {
.dev = dev,
.mtu = mtu,
};

fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
}

static const struct nla_policy rtm_ipv6_policy[(__RTA_MAX - 1)+1] = {
[RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
[RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
[RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
[RTA_OIF] = { .type = NLA_U32 },
[RTA_IIF] = { .type = NLA_U32 },
[RTA_PRIORITY] = { .type = NLA_U32 },
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
[RTA_PREF] = { .type = NLA_U8 },
[RTA_ENCAP_TYPE] = { .type = NLA_U16 },
[RTA_ENCAP] = { .type = NLA_NESTED },
[RTA_EXPIRES] = { .type = NLA_U32 },
[RTA_UID] = { .type = NLA_U32 },
[RTA_MARK] = { .type = NLA_U32 },
[RTA_TABLE] = { .type = NLA_U32 },
[RTA_IP_PROTO] = { .type = NLA_U8 },
[RTA_SPORT] = { .type = NLA_U16 },
[RTA_DPORT] = { .type = NLA_U16 },
[RTA_NH_ID] = { .type = NLA_U32 },
};

static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
struct fib6_config *cfg,
struct netlink_ext_ack *extack)
{
struct rtmsg *rtm;
struct nlattr *tb[(__RTA_MAX - 1)+1];
unsigned int pref;
int err;

err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, (__RTA_MAX - 1),
rtm_ipv6_policy, extack);
if (err < 0)
goto errout;

err = -22;
rtm = nlmsg_data(nlh);

if (rtm->rtm_tos) {
do { static const char __msg[] = "Invalid dsfield (tos): option not available for IPv6"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);

goto errout;
}

*cfg = (struct fib6_config){
.fc_table = rtm->rtm_table,
.fc_dst_len = rtm->rtm_dst_len,
.fc_src_len = rtm->rtm_src_len,
.fc_flags = 0x0001,
.fc_protocol = rtm->rtm_protocol,
.fc_type = rtm->rtm_type,

.fc_nlinfo.portid = (*(struct netlink_skb_parms*)&((skb)->cb)).portid,
.fc_nlinfo.nlh = nlh,
.fc_nlinfo.nl_net = sock_net(skb->sk),
};

if (rtm->rtm_type == RTN_UNREACHABLE ||
rtm->rtm_type == RTN_BLACKHOLE ||
rtm->rtm_type == RTN_PROHIBIT ||
rtm->rtm_type == RTN_THROW)
cfg->fc_flags |= 0x0200;

if (rtm->rtm_type == RTN_LOCAL)
cfg->fc_flags |= 0x80000000;

if (rtm->rtm_flags & 0x200)
cfg->fc_flags |= 0x01000000;

cfg->fc_flags |= (rtm->rtm_flags & 4);

if (tb[RTA_NH_ID]) {
if (tb[RTA_GATEWAY] || tb[RTA_OIF] ||
tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
do { static const char __msg[] = "Nexthop specification and nexthop id are mutually exclusive"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);

goto errout;
}
cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
}

if (tb[RTA_GATEWAY]) {
cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
cfg->fc_flags |= 0x0002;
}
if (tb[RTA_VIA]) {
do { static const char __msg[] = "IPv6 does not support RTA_VIA attribute"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
goto errout;
}

if (tb[RTA_DST]) {
int plen = (rtm->rtm_dst_len + 7) >> 3;

if (nla_len(tb[RTA_DST]) < plen)
goto errout;

nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
}

if (tb[RTA_SRC]) {
int plen = (rtm->rtm_src_len + 7) >> 3;

if (nla_len(tb[RTA_SRC]) < plen)
goto errout;

nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
}

if (tb[RTA_PREFSRC])
cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);

if (tb[RTA_OIF])
cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);

if (tb[RTA_PRIORITY])
cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);

if (tb[RTA_METRICS]) {
cfg->fc_mx = nla_data(tb[RTA_METRICS]);
cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
}

if (tb[RTA_TABLE])
cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);

if (tb[RTA_MULTIPATH]) {
cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);

err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
cfg->fc_mp_len, extack);
if (err < 0)
goto errout;
}

if (tb[RTA_PREF]) {
pref = nla_get_u8(tb[RTA_PREF]);
if (pref != 0x3 &&
pref != 0x1)
pref = 0x0;
cfg->fc_flags |= ((pref) << 27);
}

if (tb[RTA_ENCAP])
cfg->fc_encap = tb[RTA_ENCAP];

if (tb[RTA_ENCAP_TYPE]) {
cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);

err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
if (err < 0)
goto errout;
}

if (tb[RTA_EXPIRES]) {
unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), 100);

if (addrconf_finite_timeout(timeout)) {
cfg->fc_expires = jiffies_to_clock_t(timeout * 100);
cfg->fc_flags |= 0x00400000;
}
}

err = 0;
errout:
return err;
}

struct rt6_nh {
struct fib6_info *fib6_info;
struct fib6_config r_cfg;
struct list_head next;
};

static int ip6_route_info_append(struct net *net,
struct list_head *rt6_nh_list,
struct fib6_info *rt,
struct fib6_config *r_cfg)
{
struct rt6_nh *nh;
int err = -17;

for (nh = ({ void *__mptr = (void *)((rt6_nh_list)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((rt6_nh_list)->next)), typeof(((typeof(*nh) *)0)->next)) || __builtin_types_compatible_p(typeof(*((rt6_nh_list)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*nh) *)(__mptr - __builtin_offsetof(typeof(*nh), next))); }); !(&nh->next == (rt6_nh_list)); nh = ({ void *__mptr = (void *)((nh)->next.next); _Static_assert(__builtin_types_compatible_p(typeof(*((nh)->next.next)), typeof(((typeof(*(nh)) *)0)->next)) || __builtin_types_compatible_p(typeof(*((nh)->next.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(nh)) *)(__mptr - __builtin_offsetof(typeof(*(nh)), next))); })) {

if (rt6_duplicate_nexthop(nh->fib6_info, rt))
return err;
}

nh = kzalloc(sizeof(*nh), ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
if (!nh)
return -12;
nh->fib6_info = rt;
memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
list_add_tail(&nh->next, rt6_nh_list);

return 0;
}

static void ip6_route_mpath_notify(struct fib6_info *rt,
struct fib6_info *rt_last,
struct nl_info *info,
__u16 nlflags)
{






if ((nlflags & 0x800) && rt_last && rt_last->fib6_nsiblings) {
rt = ({ void *__mptr = (void *)((&rt_last->fib6_siblings)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&rt_last->fib6_siblings)->next)), typeof(((struct fib6_info *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((&rt_last->fib6_siblings)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((struct fib6_info *)(__mptr - __builtin_offsetof(struct fib6_info, fib6_siblings))); });


}

if (rt)
inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
}

static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
{
bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
bool should_notify = false;
struct fib6_info *leaf;
struct fib6_node *fn;

rcu_read_lock();
fn = ({ typeof(*(rt->fib6_node)) *__UNIQUE_ID_rcu745 = (typeof(*(rt->fib6_node)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_746(void) ; if (!((sizeof((rt->fib6_node)) == sizeof(char) || sizeof((rt->fib6_node)) == sizeof(short) || sizeof((rt->fib6_node)) == sizeof(int) || sizeof((rt->fib6_node)) == sizeof(long)) || sizeof((rt->fib6_node)) == sizeof(long long))) __compiletime_assert_746(); } while (0); (*(const volatile typeof( _Generic(((rt->fib6_node)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->fib6_node)))) *)&((rt->fib6_node))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->fib6_node)) *)(__UNIQUE_ID_rcu745)); });
if (!fn)
goto out;

leaf = ({ typeof(*(fn->leaf)) *__UNIQUE_ID_rcu747 = (typeof(*(fn->leaf)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_748(void) ; if (!((sizeof((fn->leaf)) == sizeof(char) || sizeof((fn->leaf)) == sizeof(short) || sizeof((fn->leaf)) == sizeof(int) || sizeof((fn->leaf)) == sizeof(long)) || sizeof((fn->leaf)) == sizeof(long long))) __compiletime_assert_748(); } while (0); (*(const volatile typeof( _Generic(((fn->leaf)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((fn->leaf)))) *)&((fn->leaf))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(fn->leaf)) *)(__UNIQUE_ID_rcu747)); });
if (!leaf)
goto out;

if (rt == leaf ||
(rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric &&
rt6_qualify_for_ecmp(leaf)))
should_notify = true;
out:
rcu_read_unlock();

return should_notify;
}

static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
if (nla_len(nla) < sizeof(*gw)) {
do { static const char __msg[] = "Invalid IPv6 address in RTA_GATEWAY"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
return -22;
}

*gw = nla_get_in6_addr(nla);

return 0;
}

static int ip6_route_multipath_add(struct fib6_config *cfg,
struct netlink_ext_ack *extack)
{
struct fib6_info *rt_notif = ((void *)0), *rt_last = ((void *)0);
struct nl_info *info = &cfg->fc_nlinfo;
struct fib6_config r_cfg;
struct rtnexthop *rtnh;
struct fib6_info *rt;
struct rt6_nh *err_nh;
struct rt6_nh *nh, *nh_safe;
__u16 nlflags;
int remaining;
int attrlen;
int err = 1;
int nhn = 0;
int replace = (cfg->fc_nlinfo.nlh &&
(cfg->fc_nlinfo.nlh->nlmsg_flags & 0x100));
struct list_head rt6_nh_list = { &(rt6_nh_list), &(rt6_nh_list) };

nlflags = replace ? 0x100 : 0x400;
if (info->nlh && info->nlh->nlmsg_flags & 0x800)
nlflags |= 0x800;

remaining = cfg->fc_mp_len;
rtnh = (struct rtnexthop *)cfg->fc_mp;




while (rtnh_ok(rtnh, remaining)) {
memcpy(&r_cfg, cfg, sizeof(*cfg));
if (rtnh->rtnh_ifindex)
r_cfg.fc_ifindex = rtnh->rtnh_ifindex;

attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);

nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla) {
err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
extack);
if (err)
goto cleanup;

r_cfg.fc_flags |= 0x0002;
}
r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);




nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla)
r_cfg.fc_encap_type = nla_get_u16(nla);
}

r_cfg.fc_flags |= (rtnh->rtnh_flags & 4);
rt = ip6_route_info_create(&r_cfg, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)), extack);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = ((void *)0);
goto cleanup;
}
if (!rt6_qualify_for_ecmp(rt)) {
err = -22;
do { static const char __msg[] = "Device only routes can not be added for IPv6 using the multipath API."; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);

fib6_info_release(rt);
goto cleanup;
}

rt->fib6_nh->nh_common.nhc_weight = rtnh->rtnh_hops + 1;

err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
rt, &r_cfg);
if (err) {
fib6_info_release(rt);
goto cleanup;
}

rtnh = rtnh_next(rtnh, &remaining);
}

if (list_empty(&rt6_nh_list)) {
do { static const char __msg[] = "Invalid nexthop configuration - no valid nexthops"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);

return -22;
}





info->skip_notify = 1;




info->skip_notify_kernel = 1;

err_nh = ((void *)0);
for (nh = ({ void *__mptr = (void *)((&rt6_nh_list)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&rt6_nh_list)->next)), typeof(((typeof(*nh) *)0)->next)) || __builtin_types_compatible_p(typeof(*((&rt6_nh_list)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*nh) *)(__mptr - __builtin_offsetof(typeof(*nh), next))); }); !(&nh->next == (&rt6_nh_list)); nh = ({ void *__mptr = (void *)((nh)->next.next); _Static_assert(__builtin_types_compatible_p(typeof(*((nh)->next.next)), typeof(((typeof(*(nh)) *)0)->next)) || __builtin_types_compatible_p(typeof(*((nh)->next.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(nh)) *)(__mptr - __builtin_offsetof(typeof(*(nh)), next))); })) {
err = __ip6_ins_rt(nh->fib6_info, info, extack);
fib6_info_release(nh->fib6_info);

if (!err) {

rt_last = nh->fib6_info;


if (!rt_notif)
rt_notif = nh->fib6_info;
}


nh->fib6_info = ((void *)0);
if (err) {
if (replace && nhn)
do { static const char __msg[] = "ipv6" ": " "multipath route replace failed (check consistency of installed routes)"; struct netlink_ext_ack *__extack = ((extack)); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);

err_nh = nh;
goto add_errout;
}
# 5354 "net/ipv6/route.c"
if (cfg->fc_nlinfo.nlh) {
cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(0x200 |
0x100);
cfg->fc_nlinfo.nlh->nlmsg_flags |= 0x400;
}
nhn++;
}






if (ip6_route_mpath_should_notify(rt_notif)) {
enum fib_event_type fib_event;

if (rt_notif->fib6_nsiblings != nhn - 1)
fib_event = FIB_EVENT_ENTRY_APPEND;
else
fib_event = FIB_EVENT_ENTRY_REPLACE;

err = call_fib6_multipath_entry_notifiers(info->nl_net,
fib_event, rt_notif,
nhn - 1, extack);
if (err) {

err_nh = ((void *)0);
goto add_errout;
}
}


ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
goto cleanup;

add_errout:




if (rt_notif)
ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);


for (nh = ({ void *__mptr = (void *)((&rt6_nh_list)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&rt6_nh_list)->next)), typeof(((typeof(*nh) *)0)->next)) || __builtin_types_compatible_p(typeof(*((&rt6_nh_list)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*nh) *)(__mptr - __builtin_offsetof(typeof(*nh), next))); }); !(&nh->next == (&rt6_nh_list)); nh = ({ void *__mptr = (void *)((nh)->next.next); _Static_assert(__builtin_types_compatible_p(typeof(*((nh)->next.next)), typeof(((typeof(*(nh)) *)0)->next)) || __builtin_types_compatible_p(typeof(*((nh)->next.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(nh)) *)(__mptr - __builtin_offsetof(typeof(*(nh)), next))); })) {
if (err_nh == nh)
break;
ip6_route_del(&nh->r_cfg, extack);
}

cleanup:
for (nh = ({ void *__mptr = (void *)((&rt6_nh_list)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&rt6_nh_list)->next)), typeof(((typeof(*nh) *)0)->next)) || __builtin_types_compatible_p(typeof(*((&rt6_nh_list)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*nh) *)(__mptr - __builtin_offsetof(typeof(*nh), next))); }), nh_safe = ({ void *__mptr = (void *)((nh)->next.next); _Static_assert(__builtin_types_compatible_p(typeof(*((nh)->next.next)), typeof(((typeof(*(nh)) *)0)->next)) || __builtin_types_compatible_p(typeof(*((nh)->next.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(nh)) *)(__mptr - __builtin_offsetof(typeof(*(nh)), next))); }); !(&nh->next == (&rt6_nh_list)); nh = nh_safe, nh_safe = ({ void *__mptr = (void *)((nh_safe)->next.next); _Static_assert(__builtin_types_compatible_p(typeof(*((nh_safe)->next.next)), typeof(((typeof(*(nh_safe)) *)0)->next)) || __builtin_types_compatible_p(typeof(*((nh_safe)->next.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(nh_safe)) *)(__mptr - __builtin_offsetof(typeof(*(nh_safe)), next))); })) {
if (nh->fib6_info)
fib6_info_release(nh->fib6_info);
list_del(&nh->next);
kfree(nh);
}

return err;
}

static int ip6_route_multipath_del(struct fib6_config *cfg,
struct netlink_ext_ack *extack)
{
struct fib6_config r_cfg;
struct rtnexthop *rtnh;
int last_err = 0;
int remaining;
int attrlen;
int err;

remaining = cfg->fc_mp_len;
rtnh = (struct rtnexthop *)cfg->fc_mp;


while (rtnh_ok(rtnh, remaining)) {
memcpy(&r_cfg, cfg, sizeof(*cfg));
if (rtnh->rtnh_ifindex)
r_cfg.fc_ifindex = rtnh->rtnh_ifindex;

attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);

nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla) {
err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
extack);
if (err) {
last_err = err;
goto next_rtnh;
}

r_cfg.fc_flags |= 0x0002;
}
}
err = ip6_route_del(&r_cfg, extack);
if (err)
last_err = err;

next_rtnh:
rtnh = rtnh_next(rtnh, &remaining);
}

return last_err;
}

static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct fib6_config cfg;
int err;

err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
if (err < 0)
return err;

if (cfg.fc_nh_id &&
!nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
do { static const char __msg[] = "Nexthop id does not exist"; struct netlink_ext_ack *__extack = (extack); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
return -22;
}

if (cfg.fc_mp)
return ip6_route_multipath_del(&cfg, extack);
else {
cfg.fc_delete_all_nh = 1;
return ip6_route_del(&cfg, extack);
}
}

static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct fib6_config cfg;
int err;

err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
if (err < 0)
return err;

if (cfg.fc_metric == 0)
cfg.fc_metric = 1024;

if (cfg.fc_mp)
return ip6_route_multipath_add(&cfg, extack);
else
return ip6_route_add(&cfg, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)), extack);
}


static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
{
int *nexthop_len = arg;

*nexthop_len += nla_total_size(0)
+ (((sizeof(struct rtnexthop)) + 4 - 1) & ~(4 - 1))
+ nla_total_size(16);

if (nh->nh_common.nhc_lwtstate) {

*nexthop_len += lwtunnel_get_encap_size(nh->nh_common.nhc_lwtstate);

*nexthop_len += nla_total_size(2);
}

return 0;
}

static size_t rt6_nlmsg_size(struct fib6_info *f6i)
{
int nexthop_len;

if (f6i->nh) {
nexthop_len = nla_total_size(4);
nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
&nexthop_len);
} else {
struct fib6_nh *nh = f6i->fib6_nh;

nexthop_len = 0;
if (f6i->fib6_nsiblings) {
nexthop_len = nla_total_size(0)
+ (((sizeof(struct rtnexthop)) + 4 - 1) & ~(4 - 1))
+ nla_total_size(16)
+ lwtunnel_get_encap_size(nh->nh_common.nhc_lwtstate);

nexthop_len *= f6i->fib6_nsiblings;
}
nexthop_len += lwtunnel_get_encap_size(nh->nh_common.nhc_lwtstate);
}

return ( ((sizeof(struct rtmsg))+4U -1) & ~(4U -1) )
+ nla_total_size(16)
+ nla_total_size(16)
+ nla_total_size(16)
+ nla_total_size(16)
+ nla_total_size(4)
+ nla_total_size(4)
+ nla_total_size(4)
+ nla_total_size(4)
+ (__RTAX_MAX - 1) * nla_total_size(4)
+ nla_total_size(sizeof(struct rta_cacheinfo))
+ nla_total_size(16)
+ nla_total_size(1)
+ nexthop_len;
}

static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
unsigned char *flags)
{
if (nexthop_is_multipath(nh)) {
struct nlattr *mp;

mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
if (!mp)
goto nla_put_failure;

if (nexthop_mpath_fill_node(skb, nh, 10))
goto nla_put_failure;

nla_nest_end(skb, mp);
} else {
struct fib6_nh *fib6_nh;

fib6_nh = nexthop_fib6_nh(nh);
if (fib_nexthop_info(skb, &fib6_nh->nh_common, 10,
flags, false) < 0)
goto nla_put_failure;
}

return 0;

nla_put_failure:
return -90;
}

static int rt6_fill_node(struct net *net, struct sk_buff *skb,
struct fib6_info *rt, struct dst_entry *dst,
struct in6_addr *dest, struct in6_addr *src,
int iif, int type, u32 portid, u32 seq,
unsigned int flags)
{
struct rt6_info *rt6 = (struct rt6_info *)dst;
struct rt6key *rt6_dst, *rt6_src;
u32 *pmetrics, table, rt6_flags;
unsigned char nh_flags = 0;
struct nlmsghdr *nlh;
struct rtmsg *rtm;
long expires = 0;

nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
if (!nlh)
return -90;

if (rt6) {
rt6_dst = &rt6->rt6i_dst;
rt6_src = &rt6->rt6i_src;
rt6_flags = rt6->rt6i_flags;
} else {
rt6_dst = &rt->fib6_dst;
rt6_src = &rt->fib6_src;
rt6_flags = rt->fib6_flags;
}

rtm = nlmsg_data(nlh);
rtm->rtm_family = 10;
rtm->rtm_dst_len = rt6_dst->plen;
rtm->rtm_src_len = rt6_src->plen;
rtm->rtm_tos = 0;
if (rt->fib6_table)
table = rt->fib6_table->tb6_id;
else
table = RT_TABLE_UNSPEC;
rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
if (nla_put_u32(skb, RTA_TABLE, table))
goto nla_put_failure;

rtm->rtm_type = rt->fib6_type;
rtm->rtm_flags = 0;
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_protocol = rt->fib6_protocol;

if (rt6_flags & 0x01000000)
rtm->rtm_flags |= 0x200;

if (dest) {
if (nla_put_in6_addr(skb, RTA_DST, dest))
goto nla_put_failure;
rtm->rtm_dst_len = 128;
} else if (rtm->rtm_dst_len)
if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
goto nla_put_failure;
# 5656 "net/ipv6/route.c"
if (iif) {
# 5667 "net/ipv6/route.c"
if (nla_put_u32(skb, RTA_IIF, iif))
goto nla_put_failure;
} else if (dest) {
struct in6_addr saddr_buf;
if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
goto nla_put_failure;
}

if (rt->fib6_prefsrc.plen) {
struct in6_addr saddr_buf;
saddr_buf = rt->fib6_prefsrc.addr;
if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
goto nla_put_failure;
}

pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
if (rtnetlink_put_metrics(skb, pmetrics) < 0)
goto nla_put_failure;

if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
goto nla_put_failure;




if (rt6) {
if (rt6_flags & 0x0002 &&
nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
goto nla_put_failure;

if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
goto nla_put_failure;

if (dst->lwtstate &&
lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
goto nla_put_failure;
} else if (rt->fib6_nsiblings) {
struct fib6_info *sibling, *next_sibling;
struct nlattr *mp;

mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
if (!mp)
goto nla_put_failure;

if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
rt->fib6_nh->nh_common.nhc_weight, 10,
0) < 0)
goto nla_put_failure;

for (sibling = ({ void *__mptr = (void *)((&rt->fib6_siblings)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(((typeof(*sibling) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((&rt->fib6_siblings)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*sibling) *)(__mptr - __builtin_offsetof(typeof(*sibling), fib6_siblings))); }), next_sibling = ({ void *__mptr = (void *)((sibling)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((sibling)->fib6_siblings.next)), typeof(((typeof(*(sibling)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((sibling)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(sibling)) *)(__mptr - __builtin_offsetof(typeof(*(sibling)), fib6_siblings))); }); !(&sibling->fib6_siblings == (&rt->fib6_siblings)); sibling = next_sibling, next_sibling = ({ void *__mptr = (void *)((next_sibling)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((next_sibling)->fib6_siblings.next)), typeof(((typeof(*(next_sibling)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((next_sibling)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(next_sibling)) *)(__mptr - __builtin_offsetof(typeof(*(next_sibling)), fib6_siblings))); })) {

if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
sibling->fib6_nh->nh_common.nhc_weight,
10, 0) < 0)
goto nla_put_failure;
}

nla_nest_end(skb, mp);
} else if (rt->nh) {
if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
goto nla_put_failure;

if (nexthop_is_blackhole(rt->nh))
rtm->rtm_type = RTN_BLACKHOLE;

if (net->ipv4.sysctl_nexthop_compat_mode &&
rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
goto nla_put_failure;

rtm->rtm_flags |= nh_flags;
} else {
if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, 10,
&nh_flags, false) < 0)
goto nla_put_failure;

rtm->rtm_flags |= nh_flags;
}

if (rt6_flags & 0x00400000) {
expires = dst ? dst->expires : rt->expires;
expires -= jiffies;
}

if (!dst) {
if (({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_749(void) ; if (!((sizeof(rt->offload) == sizeof(char) || sizeof(rt->offload) == sizeof(short) || sizeof(rt->offload) == sizeof(int) || sizeof(rt->offload) == sizeof(long)) || sizeof(rt->offload) == sizeof(long long))) __compiletime_assert_749(); } while (0); (*(const volatile typeof( _Generic((rt->offload), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rt->offload))) *)&(rt->offload)); }))
rtm->rtm_flags |= 0x4000;
if (({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_750(void) ; if (!((sizeof(rt->trap) == sizeof(char) || sizeof(rt->trap) == sizeof(short) || sizeof(rt->trap) == sizeof(int) || sizeof(rt->trap) == sizeof(long)) || sizeof(rt->trap) == sizeof(long long))) __compiletime_assert_750(); } while (0); (*(const volatile typeof( _Generic((rt->trap), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rt->trap))) *)&(rt->trap)); }))
rtm->rtm_flags |= 0x8000;
if (({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_751(void) ; if (!((sizeof(rt->offload_failed) == sizeof(char) || sizeof(rt->offload_failed) == sizeof(short) || sizeof(rt->offload_failed) == sizeof(int) || sizeof(rt->offload_failed) == sizeof(long)) || sizeof(rt->offload_failed) == sizeof(long long))) __compiletime_assert_751(); } while (0); (*(const volatile typeof( _Generic((rt->offload_failed), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rt->offload_failed))) *)&(rt->offload_failed)); }))
rtm->rtm_flags |= 0x20000000;
}

if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
goto nla_put_failure;

if (nla_put_u8(skb, RTA_PREF, (((rt6_flags) & 0x18000000) >> 27)))
goto nla_put_failure;


nlmsg_end(skb, nlh);
return 0;

nla_put_failure:
nlmsg_cancel(skb, nlh);
return -90;
}

static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
{
const struct net_device *dev = arg;

if (nh->nh_common.nhc_dev == dev)
return 1;

return 0;
}

static bool fib6_info_uses_dev(const struct fib6_info *f6i,
const struct net_device *dev)
{
if (f6i->nh) {
struct net_device *_dev = (struct net_device *)dev;

return !!nexthop_for_each_fib6_nh(f6i->nh,
fib6_info_nh_uses_dev,
_dev);
}

if (f6i->fib6_nh->nh_common.nhc_dev == dev)
return true;

if (f6i->fib6_nsiblings) {
struct fib6_info *sibling, *next_sibling;

for (sibling = ({ void *__mptr = (void *)((&f6i->fib6_siblings)->next); _Static_assert(__builtin_types_compatible_p(typeof(*((&f6i->fib6_siblings)->next)), typeof(((typeof(*sibling) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((&f6i->fib6_siblings)->next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*sibling) *)(__mptr - __builtin_offsetof(typeof(*sibling), fib6_siblings))); }), next_sibling = ({ void *__mptr = (void *)((sibling)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((sibling)->fib6_siblings.next)), typeof(((typeof(*(sibling)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((sibling)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(sibling)) *)(__mptr - __builtin_offsetof(typeof(*(sibling)), fib6_siblings))); }); !(&sibling->fib6_siblings == (&f6i->fib6_siblings)); sibling = next_sibling, next_sibling = ({ void *__mptr = (void *)((next_sibling)->fib6_siblings.next); _Static_assert(__builtin_types_compatible_p(typeof(*((next_sibling)->fib6_siblings.next)), typeof(((typeof(*(next_sibling)) *)0)->fib6_siblings)) || __builtin_types_compatible_p(typeof(*((next_sibling)->fib6_siblings.next)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(next_sibling)) *)(__mptr - __builtin_offsetof(typeof(*(next_sibling)), fib6_siblings))); })) {

if (sibling->fib6_nh->nh_common.nhc_dev == dev)
return true;
}
}

return false;
}

struct fib6_nh_exception_dump_walker {
struct rt6_rtnl_dump_arg *dump;
struct fib6_info *rt;
unsigned int flags;
unsigned int skip;
unsigned int count;
};

static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
{
struct fib6_nh_exception_dump_walker *w = arg;
struct rt6_rtnl_dump_arg *dump = w->dump;
struct rt6_exception_bucket *bucket;
struct rt6_exception *rt6_ex;
int i, err;

bucket = fib6_nh_get_excptn_bucket(nh, ((void *)0));
if (!bucket)
return 0;

for (i = 0; i < (1 << 10); i++) {
for (rt6_ex = ({ typeof((&bucket->chain)->first) ____ptr = ((&bucket->chain)->first); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(rt6_ex)) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(rt6_ex)) *)(__mptr - __builtin_offsetof(typeof(*(rt6_ex)), hlist))); }) : ((void *)0); }); rt6_ex; rt6_ex = ({ typeof((rt6_ex)->hlist.next) ____ptr = ((rt6_ex)->hlist.next); ____ptr ? ({ void *__mptr = (void *)(____ptr); _Static_assert(__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(rt6_ex)) *)0)->hlist)) || __builtin_types_compatible_p(typeof(*(____ptr)), typeof(void)), "pointer type mismatch in container_of()"); ((typeof(*(rt6_ex)) *)(__mptr - __builtin_offsetof(typeof(*(rt6_ex)), hlist))); }) : ((void *)0); })) {
if (w->skip) {
w->skip--;
continue;
}
# 5850 "net/ipv6/route.c"
if (rt6_check_expired(rt6_ex->rt6i)) {
w->count++;
continue;
}

err = rt6_fill_node(dump->net, dump->skb, w->rt,
&rt6_ex->rt6i->dst, ((void *)0), ((void *)0), 0,
RTM_NEWROUTE,
(*(struct netlink_skb_parms*)&((dump->cb->skb)->cb)).portid,
dump->cb->nlh->nlmsg_seq, w->flags);
if (err)
return err;

w->count++;
}
bucket++;
}

return 0;
}


int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
{
struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
struct fib_dump_filter *filter = &arg->filter;
unsigned int flags = 0x02;
struct net *net = arg->net;
int count = 0;

if (rt == net->ipv6.fib6_null_entry)
return -1;

if ((filter->flags & 0x800) &&
!(rt->fib6_flags & 0x00080000)) {

return -1;
}
if (filter->filter_set &&
((filter->rt_type && rt->fib6_type != filter->rt_type) ||
(filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
(filter->protocol && rt->fib6_protocol != filter->protocol))) {
return -1;
}

if (filter->filter_set ||
!filter->dump_routes || !filter->dump_exceptions) {
flags |= 0x20;
}

if (filter->dump_routes) {
if (skip) {
skip--;
} else {
if (rt6_fill_node(net, arg->skb, rt, ((void *)0), ((void *)0), ((void *)0),
0, RTM_NEWROUTE,
(*(struct netlink_skb_parms*)&((arg->cb->skb)->cb)).portid,
arg->cb->nlh->nlmsg_seq, flags)) {
return 0;
}
count++;
}
}

if (filter->dump_exceptions) {
struct fib6_nh_exception_dump_walker w = { .dump = arg,
.rt = rt,
.flags = flags,
.skip = skip,
.count = 0 };
int err;

rcu_read_lock();
if (rt->nh) {
err = nexthop_for_each_fib6_nh(rt->nh,
rt6_nh_dump_exceptions,
&w);
} else {
err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
}
rcu_read_unlock();

if (err)
return count += w.count;
}

return -1;
}

static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct nlattr **tb,
struct netlink_ext_ack *extack)
{
struct rtmsg *rtm;
int i, err;

if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
do { static const char __msg[] = "ipv6" ": " "Invalid header for get route request"; struct netlink_ext_ack *__extack = ((extack)); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);

return -22;
}

if (!netlink_strict_get_check(skb))
return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, (__RTA_MAX - 1),
rtm_ipv6_policy, extack);

rtm = nlmsg_data(nlh);
if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
(rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
rtm->rtm_type) {
do { static const char __msg[] = "ipv6" ": " "Invalid values in header for get route request"; struct netlink_ext_ack *__extack = ((extack)); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
return -22;
}
if (rtm->rtm_flags & ~0x2000) {
do { static const char __msg[] = "ipv6" ": " "Invalid flags for get route request"; struct netlink_ext_ack *__extack = ((extack)); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);

return -22;
}

err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, (__RTA_MAX - 1),
rtm_ipv6_policy, extack);
if (err)
return err;

if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
(tb[RTA_DST] && !rtm->rtm_dst_len)) {
do { static const char __msg[] = "ipv6" ": " "rtm_src_len and rtm_dst_len must be 128 for IPv6"; struct netlink_ext_ack *__extack = ((extack)); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
return -22;
}

for (i = 0; i <= (__RTA_MAX - 1); i++) {
if (!tb[i])
continue;

switch (i) {
case RTA_SRC:
case RTA_DST:
case RTA_IIF:
case RTA_OIF:
case RTA_MARK:
case RTA_UID:
case RTA_SPORT:
case RTA_DPORT:
case RTA_IP_PROTO:
break;
default:
do { static const char __msg[] = "ipv6" ": " "Unsupported attribute in get route request"; struct netlink_ext_ack *__extack = ((extack)); do_trace_netlink_extack(__msg); if (__extack) __extack->_msg = __msg; } while (0);
return -22;
}
}

return 0;
}

static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[(__RTA_MAX - 1)+1];
int err, iif = 0, oif = 0;
struct fib6_info *from;
struct dst_entry *dst;
struct rt6_info *rt;
struct sk_buff *skb;
struct rtmsg *rtm;
struct flowi6 fl6 = {};
bool fibmatch;

err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
if (err < 0)
goto errout;

err = -22;
rtm = nlmsg_data(nlh);
fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
fibmatch = !!(rtm->rtm_flags & 0x2000);

if (tb[RTA_SRC]) {
if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
goto errout;

fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
}

if (tb[RTA_DST]) {
if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
goto errout;

fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
}

if (tb[RTA_IIF])
iif = nla_get_u32(tb[RTA_IIF]);

if (tb[RTA_OIF])
oif = nla_get_u32(tb[RTA_OIF]);

if (tb[RTA_MARK])
fl6.__fl_common.flowic_mark = nla_get_u32(tb[RTA_MARK]);

if (tb[RTA_UID])
fl6.__fl_common.flowic_uid = make_kuid((({ ({ do { } while (0 && (!((1)))); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); })->user_ns; })),
nla_get_u32(tb[RTA_UID]));
else
fl6.__fl_common.flowic_uid = iif ? (kuid_t){ -1 } : (({ ({ do { } while (0 && (!((1)))); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); })->uid; }));

if (tb[RTA_SPORT])
fl6.uli.ports.sport = nla_get_be16(tb[RTA_SPORT]);

if (tb[RTA_DPORT])
fl6.uli.ports.dport = nla_get_be16(tb[RTA_DPORT]);

if (tb[RTA_IP_PROTO]) {
err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
&fl6.__fl_common.flowic_proto, 10,
extack);
if (err)
goto errout;
}

if (iif) {
struct net_device *dev;
int flags = 0;

rcu_read_lock();

dev = dev_get_by_index_rcu(net, iif);
if (!dev) {
rcu_read_unlock();
err = -19;
goto errout;
}

fl6.__fl_common.flowic_iif = iif;

if (!ipv6_addr_any(&fl6.saddr))
flags |= 0x00000004;

dst = ip6_route_input_lookup(net, dev, &fl6, ((void *)0), flags);

rcu_read_unlock();
} else {
fl6.__fl_common.flowic_oif = oif;

dst = ip6_route_output(net, ((void *)0), &fl6);
}


rt = ({ void *__mptr = (void *)(dst); _Static_assert(__builtin_types_compatible_p(typeof(*(dst)), typeof(((struct rt6_info *)0)->dst)) || __builtin_types_compatible_p(typeof(*(dst)), typeof(void)), "pointer type mismatch in container_of()"); ((struct rt6_info *)(__mptr - __builtin_offsetof(struct rt6_info, dst))); });
if (rt->dst.error) {
err = rt->dst.error;
ip6_rt_put(rt);
goto errout;
}

if (rt == net->ipv6.ip6_null_entry) {
err = rt->dst.error;
ip6_rt_put(rt);
goto errout;
}

skb = alloc_skb(((((1UL) << (12))) - ((((sizeof(struct skb_shared_info))) + ((typeof((sizeof(struct skb_shared_info))))(((1 << 6))) - 1)) & ~((typeof((sizeof(struct skb_shared_info))))(((1 << 6))) - 1))), ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
if (!skb) {
ip6_rt_put(rt);
err = -105;
goto errout;
}

skb_dst_set(skb, &rt->dst);

rcu_read_lock();
from = ({ typeof(*(rt->from)) *__UNIQUE_ID_rcu752 = (typeof(*(rt->from)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_753(void) ; if (!((sizeof((rt->from)) == sizeof(char) || sizeof((rt->from)) == sizeof(short) || sizeof((rt->from)) == sizeof(int) || sizeof((rt->from)) == sizeof(long)) || sizeof((rt->from)) == sizeof(long long))) __compiletime_assert_753(); } while (0); (*(const volatile typeof( _Generic(((rt->from)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rt->from)))) *)&((rt->from))); }); do { } while (0 && (!((0) || rcu_read_lock_held()))); ; ((typeof(*(rt->from)) *)(__UNIQUE_ID_rcu752)); });
if (from) {
if (fibmatch)
err = rt6_fill_node(net, skb, from, ((void *)0), ((void *)0), ((void *)0),
iif, RTM_NEWROUTE,
(*(struct netlink_skb_parms*)&((in_skb)->cb)).portid,
nlh->nlmsg_seq, 0);
else
err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
&fl6.saddr, iif, RTM_NEWROUTE,
(*(struct netlink_skb_parms*)&((in_skb)->cb)).portid,
nlh->nlmsg_seq, 0);
} else {
err = -101;
}
rcu_read_unlock();

if (err < 0) {
kfree_skb(skb);
goto errout;
}

err = rtnl_unicast(skb, net, (*(struct netlink_skb_parms*)&((in_skb)->cb)).portid);
errout:
return err;
}

void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
unsigned int nlm_flags)
{
struct sk_buff *skb;
struct net *net = info->nl_net;
u32 seq;
int err;

err = -105;
seq = info->nlh ? info->nlh->nlmsg_seq : 0;

skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
if (!skb)
goto errout;

err = rt6_fill_node(net, skb, rt, ((void *)0), ((void *)0), ((void *)0), 0,
event, info->portid, seq, nlm_flags);
if (err < 0) {

({ int __ret_warn_on = !!(err == -90); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("net/ipv6/route.c"), "i" (6169), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
info->nlh, gfp_any());
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
}

void fib6_rt_update(struct net *net, struct fib6_info *rt,
struct nl_info *info)
{
u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
struct sk_buff *skb;
int err = -105;

skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
if (!skb)
goto errout;

err = rt6_fill_node(net, skb, rt, ((void *)0), ((void *)0), ((void *)0), 0,
RTM_NEWROUTE, info->portid, seq, 0x100);
if (err < 0) {

({ int __ret_warn_on = !!(err == -90); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("net/ipv6/route.c"), "i" (6196), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
info->nlh, gfp_any());
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
}

void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
bool offload, bool trap, bool offload_failed)
{
struct sk_buff *skb;
int err;

if (({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_754(void) ; if (!((sizeof(f6i->offload) == sizeof(char) || sizeof(f6i->offload) == sizeof(short) || sizeof(f6i->offload) == sizeof(int) || sizeof(f6i->offload) == sizeof(long)) || sizeof(f6i->offload) == sizeof(long long))) __compiletime_assert_754(); } while (0); (*(const volatile typeof( _Generic((f6i->offload), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (f6i->offload))) *)&(f6i->offload)); }) == offload &&
({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_755(void) ; if (!((sizeof(f6i->trap) == sizeof(char) || sizeof(f6i->trap) == sizeof(short) || sizeof(f6i->trap) == sizeof(int) || sizeof(f6i->trap) == sizeof(long)) || sizeof(f6i->trap) == sizeof(long long))) __compiletime_assert_755(); } while (0); (*(const volatile typeof( _Generic((f6i->trap), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (f6i->trap))) *)&(f6i->trap)); }) == trap &&
({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_756(void) ; if (!((sizeof(f6i->offload_failed) == sizeof(char) || sizeof(f6i->offload_failed) == sizeof(short) || sizeof(f6i->offload_failed) == sizeof(int) || sizeof(f6i->offload_failed) == sizeof(long)) || sizeof(f6i->offload_failed) == sizeof(long long))) __compiletime_assert_756(); } while (0); (*(const volatile typeof( _Generic((f6i->offload_failed), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (f6i->offload_failed))) *)&(f6i->offload_failed)); }) == offload_failed)
return;

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_757(void) ; if (!((sizeof(f6i->offload) == sizeof(char) || sizeof(f6i->offload) == sizeof(short) || sizeof(f6i->offload) == sizeof(int) || sizeof(f6i->offload) == sizeof(long)) || sizeof(f6i->offload) == sizeof(long long))) __compiletime_assert_757(); } while (0); do { *(volatile typeof(f6i->offload) *)&(f6i->offload) = (offload); } while (0); } while (0);
do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_758(void) ; if (!((sizeof(f6i->trap) == sizeof(char) || sizeof(f6i->trap) == sizeof(short) || sizeof(f6i->trap) == sizeof(int) || sizeof(f6i->trap) == sizeof(long)) || sizeof(f6i->trap) == sizeof(long long))) __compiletime_assert_758(); } while (0); do { *(volatile typeof(f6i->trap) *)&(f6i->trap) = (trap); } while (0); } while (0);


if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 &&
({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_759(void) ; if (!((sizeof(f6i->offload_failed) == sizeof(char) || sizeof(f6i->offload_failed) == sizeof(short) || sizeof(f6i->offload_failed) == sizeof(int) || sizeof(f6i->offload_failed) == sizeof(long)) || sizeof(f6i->offload_failed) == sizeof(long long))) __compiletime_assert_759(); } while (0); (*(const volatile typeof( _Generic((f6i->offload_failed), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (f6i->offload_failed))) *)&(f6i->offload_failed)); }) == offload_failed)
return;

do { do { __attribute__((__noreturn__)) extern void __compiletime_assert_760(void) ; if (!((sizeof(f6i->offload_failed) == sizeof(char) || sizeof(f6i->offload_failed) == sizeof(short) || sizeof(f6i->offload_failed) == sizeof(int) || sizeof(f6i->offload_failed) == sizeof(long)) || sizeof(f6i->offload_failed) == sizeof(long long))) __compiletime_assert_760(); } while (0); do { *(volatile typeof(f6i->offload_failed) *)&(f6i->offload_failed) = (offload_failed); } while (0); } while (0);

if (!({ typeof(*(f6i->fib6_node)) *__UNIQUE_ID_rcu761 = (typeof(*(f6i->fib6_node)) *)({ do { __attribute__((__noreturn__)) extern void __compiletime_assert_762(void) ; if (!((sizeof((f6i->fib6_node)) == sizeof(char) || sizeof((f6i->fib6_node)) == sizeof(short) || sizeof((f6i->fib6_node)) == sizeof(int) || sizeof((f6i->fib6_node)) == sizeof(long)) || sizeof((f6i->fib6_node)) == sizeof(long long))) __compiletime_assert_762(); } while (0); (*(const volatile typeof( _Generic(((f6i->fib6_node)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((f6i->fib6_node)))) *)&((f6i->fib6_node))); }); ; ((typeof(*(f6i->fib6_node)) *)(__UNIQUE_ID_rcu761)); }))



return;

if (!net->ipv6.sysctl.fib_notify_on_flag_change)
return;

skb = nlmsg_new(rt6_nlmsg_size(f6i), ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
if (!skb) {
err = -105;
goto errout;
}

err = rt6_fill_node(net, skb, f6i, ((void *)0), ((void *)0), ((void *)0), 0, RTM_NEWROUTE, 0,
0, 0);
if (err < 0) {

({ int __ret_warn_on = !!(err == -90); if (__builtin_expect(!!(__ret_warn_on), 0)) do { __asm__ __volatile__ ( "1:\n\t" "ebreak\n" ".pushsection __bug_table,\"aw\"\n\t" "2:\n\t" ".word" " 1b - 2b" "\n\t" ".word" " %0 - 2b" "\n\t" ".half" " %1\n\t" ".half" " %2" "\n\t" ".org 2b + %3\n\t" ".popsection" : : "i" ("net/ipv6/route.c"), "i" (6248), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); __builtin_expect(!!(__ret_warn_on), 0); });
kfree_skb(skb);
goto errout;
}

rtnl_notify(skb, net, 0, RTNLGRP_IPV6_ROUTE, ((void *)0), ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
return;

errout:
rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
}
extern typeof(fib6_info_hw_flags_set) fib6_info_hw_flags_set; extern const char __kstrtab_fib6_info_hw_flags_set[]; extern const char __kstrtabns_fib6_info_hw_flags_set[]; ; asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "fib6_info_hw_flags_set" ": \n" " .asciz \"" "fib6_info_hw_flags_set" "\" \n" "__kstrtabns_" "fib6_info_hw_flags_set" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static const struct kernel_symbol __ksymtab_fib6_info_hw_flags_set __attribute__((section("___ksymtab" "" "+" "fib6_info_hw_flags_set"), used)) __attribute__((__aligned__(sizeof(void *)))) = { (unsigned long)&fib6_info_hw_flags_set, __kstrtab_fib6_info_hw_flags_set, __kstrtabns_fib6_info_hw_flags_set };

static int ip6_route_dev_notify(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);

if (!(dev->flags & IFF_LOOPBACK))
return 0x0001;

if (event == NETDEV_REGISTER) {
net->ipv6.fib6_null_entry->fib6_nh->nh_common.nhc_dev = dev;
net->ipv6.ip6_null_entry->dst.dev = dev;
net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);






} else if (event == NETDEV_UNREGISTER &&
dev->reg_state != NETREG_UNREGISTERED) {



in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);




}

return 0x0001;
}






static int rt6_stats_seq_show(struct seq_file *seq, void *v)
{
struct net *net = (struct net *)seq->private;
seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
net->ipv6.rt6_stats->fib_nodes,
net->ipv6.rt6_stats->fib_route_nodes,
atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
net->ipv6.rt6_stats->fib_rt_entries,
net->ipv6.rt6_stats->fib_rt_cache,
dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
net->ipv6.rt6_stats->fib_discarded_routes);

return 0;
}




static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net;
int delay;
int ret;
if (!write)
return -22;

net = (struct net *)ctl->extra1;
delay = net->ipv6.sysctl.flush_delay;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (ret)
return ret;

fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
return 0;
}

static struct ctl_table ipv6_route_table_template[] = {
{
.procname = "max_size",
.data = &init_net.ipv6.sysctl.ip6_rt_max_size,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "gc_thresh",
.data = &ip6_dst_ops_template.gc_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "flush",
.data = &init_net.ipv6.sysctl.flush_delay,
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = ipv6_sysctl_rtcache_flush
},
{
.procname = "gc_min_interval",
.data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_timeout",
.data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_interval",
.data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_elasticity",
.data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "mtu_expires",
.data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "min_adv_mss",
.data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "gc_min_interval_ms",
.data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
{
.procname = "skip_notify_on_dev_down",
.data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = ((void *)&sysctl_vals[1]),
.extra2 = ((void *)&sysctl_vals[2]),
},
{ }
};

struct ctl_table * ipv6_route_sysctl_init(struct net *net)
{
struct ctl_table *table;

table = kmemdup(ipv6_route_table_template,
sizeof(ipv6_route_table_template),
((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));

if (table) {
table[0].data = &net->ipv6.sysctl.ip6_rt_max_size;
table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
table[2].data = &net->ipv6.sysctl.flush_delay;
table[2].extra1 = net;
table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;


if (net->user_ns != &init_user_ns)
table[1].procname = ((void *)0);
}

return table;
}


static int ip6_route_net_init(struct net *net)
{
int ret = -12;

memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
sizeof(net->ipv6.ip6_dst_ops));

if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
goto out_ip6_dst_ops;

net->ipv6.fib6_null_entry = fib6_info_alloc(((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)), true);
if (!net->ipv6.fib6_null_entry)
goto out_ip6_dst_entries;
memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
sizeof(*net->ipv6.fib6_null_entry));

net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
sizeof(*net->ipv6.ip6_null_entry),
((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
if (!net->ipv6.ip6_null_entry)
goto out_fib6_null_entry;
net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
ip6_template_metrics, true);
INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached);
# 6503 "net/ipv6/route.c"
net->ipv6.sysctl.flush_delay = 0;
net->ipv6.sysctl.ip6_rt_max_size = 4096;
net->ipv6.sysctl.ip6_rt_gc_min_interval = 100 / 2;
net->ipv6.sysctl.ip6_rt_gc_timeout = 60*100;
net->ipv6.sysctl.ip6_rt_gc_interval = 30*100;
net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*100;
net->ipv6.sysctl.ip6_rt_min_advmss = 1280 - 20 - 40;
net->ipv6.sysctl.skip_notify_on_dev_down = 0;

atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*100);

ret = 0;
out:
return ret;







out_fib6_null_entry:
kfree(net->ipv6.fib6_null_entry);
out_ip6_dst_entries:
dst_entries_destroy(&net->ipv6.ip6_dst_ops);
out_ip6_dst_ops:
goto out;
}

static void ip6_route_net_exit(struct net *net)
{
kfree(net->ipv6.fib6_null_entry);
kfree(net->ipv6.ip6_null_entry);




dst_entries_destroy(&net->ipv6.ip6_dst_ops);
}

static int ip6_route_net_init_late(struct net *net)
{

proc_create_net_data("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops, sizeof(struct ipv6_route_iter), ((void *)0));

proc_create_net_single("rt6_stats", 0444, net->proc_net,
rt6_stats_seq_show, ((void *)0));

return 0;
}

static void ip6_route_net_exit_late(struct net *net)
{

remove_proc_entry("ipv6_route", net->proc_net);
remove_proc_entry("rt6_stats", net->proc_net);

}

static struct pernet_operations ip6_route_net_ops = {
.init = ip6_route_net_init,
.exit = ip6_route_net_exit,
};

static int ipv6_inetpeer_init(struct net *net)
{
struct inet_peer_base *bp = kmalloc(sizeof(*bp), ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));

if (!bp)
return -12;
inet_peer_base_init(bp);
net->ipv6.peers = bp;
return 0;
}

static void ipv6_inetpeer_exit(struct net *net)
{
struct inet_peer_base *bp = net->ipv6.peers;

net->ipv6.peers = ((void *)0);
inetpeer_invalidate_tree(bp);
kfree(bp);
}

static struct pernet_operations ipv6_inetpeer_ops = {
.init = ipv6_inetpeer_init,
.exit = ipv6_inetpeer_exit,
};

static struct pernet_operations ip6_route_net_late_ops = {
.init = ip6_route_net_init_late,
.exit = ip6_route_net_exit_late,
};

static struct notifier_block ip6_route_dev_notifier = {
.notifier_call = ip6_route_dev_notify,
.priority = 0 - 10,
};

void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) ip6_route_init_special_entries(void)
{



init_net.ipv6.fib6_null_entry->fib6_nh->nh_common.nhc_dev = init_net.loopback_dev;
init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);






}



extern int bpf_iter_ipv6_route(struct bpf_iter_meta *meta, struct fib6_info *rt); int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) bpf_iter_ipv6_route(struct bpf_iter_meta *meta, struct fib6_info *rt) { return 0; }

static u32 __attribute__((__unused__)) btf_fib6_info_id[5];


static const struct bpf_iter_seq_info ipv6_route_seq_info = {
.seq_ops = &ipv6_route_seq_ops,
.init_seq_private = bpf_iter_init_seq_net,
.fini_seq_private = bpf_iter_fini_seq_net,
.seq_priv_size = sizeof(struct ipv6_route_iter),
};

static struct bpf_iter_reg ipv6_route_reg_info = {
.target = "ipv6_route",
.ctx_arg_info_size = 1,
.ctx_arg_info = {
{ __builtin_offsetof(struct bpf_iter__ipv6_route, rt),
PTR_TO_BTF_ID_OR_NULL },
},
.seq_info = &ipv6_route_seq_info,
};

static int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) bpf_iter_register(void)
{
ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id;
return bpf_iter_reg_target(&ipv6_route_reg_info);
}

static void bpf_iter_unregister(void)
{
bpf_iter_unreg_target(&ipv6_route_reg_info);
}



int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__no_sanitize__("cfi"))) ip6_route_init(void)
{
int ret;
int cpu;

ret = -12;
ip6_dst_ops_template.kmem_cachep =
kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
((slab_flags_t )0x00002000U) | 0, ((void *)0));
if (!ip6_dst_ops_template.kmem_cachep)
goto out;

ret = dst_entries_init(&ip6_dst_blackhole_ops);
if (ret)
goto out_kmem_cache;

ret = register_pernet_subsys(&ipv6_inetpeer_ops);
if (ret)
goto out_dst_entries;

ret = register_pernet_subsys(&ip6_route_net_ops);
if (ret)
goto out_register_inetpeer;

ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;

ret = fib6_init();
if (ret)
goto out_register_subsys;

ret = xfrm6_init();
if (ret)
goto out_fib6_init;

ret = fib6_rules_init();
if (ret)
goto xfrm6_init;

ret = register_pernet_subsys(&ip6_route_net_late_ops);
if (ret)
goto fib6_rules_init;

ret = rtnl_register_module(((struct module *)0), 10, RTM_NEWROUTE,
inet6_rtm_newroute, ((void *)0), 0);
if (ret < 0)
goto out_register_late_subsys;

ret = rtnl_register_module(((struct module *)0), 10, RTM_DELROUTE,
inet6_rtm_delroute, ((void *)0), 0);
if (ret < 0)
goto out_register_late_subsys;

ret = rtnl_register_module(((struct module *)0), 10, RTM_GETROUTE,
inet6_rtm_getroute, ((void *)0),
RTNL_FLAG_DOIT_UNLOCKED);
if (ret < 0)
goto out_register_late_subsys;

ret = register_netdevice_notifier(&ip6_route_dev_notifier);
if (ret)
goto out_register_late_subsys;



ret = bpf_iter_register();
if (ret)
goto out_register_late_subsys;



for (((cpu)) = -1; ((cpu)) = cpumask_next(((cpu)), (((const struct cpumask *)&__cpu_possible_mask))), ((cpu)) < nr_cpu_ids;) {
struct uncached_list *ul = ({ do { const void *__vpp_verify = (typeof((&rt6_uncached_list) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __ptr = (unsigned long) ((typeof(*((&rt6_uncached_list))) *)((&rt6_uncached_list))); (typeof((typeof(*((&rt6_uncached_list))) *)((&rt6_uncached_list)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); });

INIT_LIST_HEAD(&ul->head);
INIT_LIST_HEAD(&ul->quarantine);
do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(&ul->lock), "&ul->lock", &__key, LD_WAIT_CONFIG); } while (0);
}

out:
return ret;

out_register_late_subsys:
rtnl_unregister_all(10);
unregister_pernet_subsys(&ip6_route_net_late_ops);
fib6_rules_init:
fib6_rules_cleanup();
xfrm6_init:
xfrm6_fini();
out_fib6_init:
fib6_gc_cleanup();
out_register_subsys:
unregister_pernet_subsys(&ip6_route_net_ops);
out_register_inetpeer:
unregister_pernet_subsys(&ipv6_inetpeer_ops);
out_dst_entries:
dst_entries_destroy(&ip6_dst_blackhole_ops);
out_kmem_cache:
kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
goto out;
}

void ip6_route_cleanup(void)
{


bpf_iter_unregister();


unregister_netdevice_notifier(&ip6_route_dev_notifier);
unregister_pernet_subsys(&ip6_route_net_late_ops);
fib6_rules_cleanup();
xfrm6_fini();
fib6_gc_cleanup();
unregister_pernet_subsys(&ipv6_inetpeer_ops);
unregister_pernet_subsys(&ip6_route_net_ops);
dst_entries_destroy(&ip6_dst_blackhole_ops);
kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
}
\
 
 \ /
  Last update: 2022-05-16 05:01    [W:6.091 / U:0.668 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site