diff --git a/PKGBUILD b/PKGBUILD index d31542e..c45963c 100644 --- a/PKGBUILD +++ b/PKGBUILD @@ -54,7 +54,7 @@ else fi pkgname=("${pkgbase}" "${pkgbase}-headers") pkgver="${_basekernel}"."${_sub}" -pkgrel=153 +pkgrel=154 pkgdesc='Linux-tkg' arch=('x86_64') # no i686 in here url="http://www.kernel.org/" @@ -465,6 +465,55 @@ case $_basever in 'b1c6599d0e1ac9b66898d652ed99dae3fb8676d840a43ffa920a78d96e0521be' 'b0319a7dff9c48b2f3e3d3597ee154bf92223149a633a8b7ce4026252db86da6') ;; + 513) + opt_ver="5.8%2B" + source=("$kernel_site" + #"$patch_site" + "https://raw.githubusercontent.com/graysky2/kernel_gcc_patch/master/more-uarches-for-kernel-5.8%2B.patch" + 'config.x86_64' # stock Arch config + #'config_hardened.x86_64' # hardened Arch config + 90-cleanup.hook + cleanup + # ARCH Patches + 0001-add-sysctl-to-disallow-unprivileged-CLONE_NEWUSER-by.patch + # TkG + 0002-clear-patches.patch + 0003-glitched-base.patch + 0003-glitched-cfs.patch + 0005-glitched-pds.patch + 0006-add-acs-overrides_iommu.patch + #0007-v5.13-fsync.patch + #0007-v5.13-futex2_interface.patch + 0007-v5.13-winesync.patch + #0008-5.13-bcachefs.patch + 0009-glitched-ondemand-bmq.patch + 0009-glitched-bmq.patch + 0009-prjc_v5.13-r0.patch + #0012-linux-hardened.patch + 0012-misc-additions.patch + # MM Dirty Soft for WRITE_WATCH support in Wine + 0001-mm-Support-soft-dirty-flag-reset-for-VA-range.patch + 0002-mm-Support-soft-dirty-flag-read-with-reset.patch + ) + sha256sums=('5571c00e27a12705ae6b7dddf7e4b80e8401e19527c8b91ac97b1fdd9aa1d0e4' + 'SKIP' + 'dbe038aaf194d7b77f4ca687f0c985468e0fb929863e23e801833410a09cc592' + '1e15fc2ef3fa770217ecc63a220e5df2ddbcf3295eb4a021171e7edd4c6cc898' + '66a03c246037451a77b4d448565b1d7e9368270c7d02872fbd0b5d024ed0a997' + 'f6383abef027fd9a430fd33415355e0df492cdc3c90e9938bf2d98f4f63b32e6' + '35a7cde86fb94939c0f25a62b8c47f3de0dbd3c65f876f460b263181b3e92fc0' + 'ef80c354d08f63363d36485b1b77b15f3d36cad1e00edbe13ba89538fbb38146' + '7058e57fd68367b029adc77f2a82928f1433daaf02c8c279cb2d13556c8804d7' + 'fca63d15ca4502aebd73e76d7499b243d2c03db71ff5ab0bf5cf268b2e576320' + '19661ec0d39f9663452b34433214c755179894528bf73a42f6ba52ccf572832a' + '034d12a73b507133da2c69a34d61efd2f6b6618549650aa26d748142d22002e1' + '9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177' + 'a557b342111849a5f920bbe1c129f3ff1fc1eff62c6bd6685e0972fc88e39911' + '741d45b45ee20dab3033e1e074266129d27a7049b14fe62949e2918375bd40c4' + '7fb1104c167edb79ec8fbdcde97940ed0f806aa978bdd14d0c665a1d76d25c24' + 'b1c6599d0e1ac9b66898d652ed99dae3fb8676d840a43ffa920a78d96e0521be' + 'b0319a7dff9c48b2f3e3d3597ee154bf92223149a633a8b7ce4026252db86da6') + ;; esac export KBUILD_BUILD_HOST=archlinux diff --git a/linux-tkg-config/5.13/90-cleanup.hook b/linux-tkg-config/5.13/90-cleanup.hook new file mode 100644 index 0000000..99f5221 --- /dev/null +++ b/linux-tkg-config/5.13/90-cleanup.hook @@ -0,0 +1,14 @@ +[Trigger] +Type = File +Operation = Install +Operation = Upgrade +Operation = Remove +Target = usr/lib/modules/*/ +Target = !usr/lib/modules/*/?* + +[Action] +Description = Cleaning up... +When = PostTransaction +Exec = /usr/share/libalpm/scripts/cleanup +NeedsTargets + diff --git a/linux-tkg-config/5.13/cleanup b/linux-tkg-config/5.13/cleanup new file mode 100755 index 0000000..c00c08d --- /dev/null +++ b/linux-tkg-config/5.13/cleanup @@ -0,0 +1,10 @@ +#!/bin/bash + +for _f in /usr/lib/modules/*tkg*; do + if [[ ! -e ${_f}/vmlinuz ]]; then + rm -rf "$_f" + fi +done + +# vim:set ft=sh sw=2 et: + diff --git a/linux-tkg-config/5.13/config.x86_64 b/linux-tkg-config/5.13/config.x86_64 new file mode 100644 index 0000000..ad0ead7 --- /dev/null +++ b/linux-tkg-config/5.13/config.x86_64 @@ -0,0 +1,10526 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 5.13.0-rc1 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (GCC) 10.2.0" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=100200 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=23601 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=23601 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_KERNEL_ZSTD=y +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="archlinux" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_WATCH_QUEUE=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_SIM=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +# CONFIG_PSI_DEFAULT_DISABLED is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y +CONFIG_RCU_EXPERT=y +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_RCU_FANOUT=64 +CONFIG_RCU_FANOUT_LEAF=16 +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_DELAY=500 +# CONFIG_RCU_NOCB_CPU is not set +# CONFIG_TASKS_TRACE_RCU_READ_MB is not set +# end of RCU Subsystem + +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=m +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y + +# +# Scheduler features +# +CONFIG_UCLAMP_TASK=y +CONFIG_UCLAMP_BUCKETS_COUNT=5 +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_UCLAMP_TASK_GROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_USER_NS_UNPRIVILEGED=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +CONFIG_BOOT_CONFIG=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_LSM=y +CONFIG_BPF_SYSCALL=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_USERMODE_DRIVER=y +CONFIG_BPF_PRELOAD=y +CONFIG_BPF_PRELOAD_UMD=m +CONFIG_USERFAULTFD=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +CONFIG_SLAB_FREELIST_HARDENED=y +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +# end of General setup + +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=5 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_FEATURE_NAMES=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_RETPOLINE=y +CONFIG_X86_CPU_RESCTRL=y +# CONFIG_X86_EXTENDED_PLATFORM is not set +CONFIG_X86_INTEL_LPSS=y +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_IOSF_MBI=y +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_XXL=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_PARAVIRT_SPINLOCKS=y +CONFIG_X86_HV_CALLBACK_VECTOR=y +CONFIG_XEN=y +CONFIG_XEN_PV=y +CONFIG_XEN_512GB=y +CONFIG_XEN_PV_SMP=y +CONFIG_XEN_DOM0=y +CONFIG_XEN_PVHVM=y +CONFIG_XEN_PVHVM_SMP=y +CONFIG_XEN_PVHVM_GUEST=y +CONFIG_XEN_SAVE_RESTORE=y +# CONFIG_XEN_DEBUG_FS is not set +CONFIG_XEN_PVH=y +CONFIG_KVM_GUEST=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +CONFIG_PVH=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +CONFIG_JAILHOUSE_GUEST=y +CONFIG_ACRN_GUEST=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_IA32_FEAT_CTL=y +CONFIG_X86_VMX_FEATURE_NAMES=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +# CONFIG_GART_IOMMU is not set +# CONFIG_MAXSMP is not set +CONFIG_NR_CPUS_RANGE_BEGIN=2 +CONFIG_NR_CPUS_RANGE_END=512 +CONFIG_NR_CPUS_DEFAULT=64 +CONFIG_NR_CPUS=320 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +# CONFIG_X86_MCELOG_LEGACY is not set +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MCE_INJECT=m + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=m +CONFIG_PERF_EVENTS_INTEL_RAPL=m +CONFIG_PERF_EVENTS_INTEL_CSTATE=m +CONFIG_PERF_EVENTS_AMD_POWER=m +# end of Performance monitoring + +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_X86_IOPL_IOPERM=y +CONFIG_I8K=m +CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y +# CONFIG_MICROCODE_OLD_INTERFACE is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_X86_5LEVEL=y +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_X86_CPA_STATISTICS=y +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +# CONFIG_NUMA_EMU is not set +CONFIG_NODES_SHIFT=5 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +# CONFIG_ARCH_MEMORY_PROBE is not set +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_X86_PMEM_LEGACY_DEVICE=y +CONFIG_X86_PMEM_LEGACY=m +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=0 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +CONFIG_X86_UMIP=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +# CONFIG_X86_INTEL_TSX_MODE_OFF is not set +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +CONFIG_X86_INTEL_TSX_MODE_AUTO=y +CONFIG_X86_SGX=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_MIXED=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +CONFIG_HZ_300=y +# CONFIG_HZ_1000 is not set +CONFIG_HZ=300 +CONFIG_SCHED_HRTICK=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_ARCH_HAS_KEXEC_PURGATORY=y +# CONFIG_KEXEC_SIG is not set +CONFIG_CRASH_DUMP=y +CONFIG_KEXEC_JUMP=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x200000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa +CONFIG_HOTPLUG_CPU=y +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +# CONFIG_COMPAT_VDSO is not set +# CONFIG_LEGACY_VSYSCALL_EMULATE is not set +CONFIG_LEGACY_VSYSCALL_XONLY=y +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +CONFIG_HAVE_LIVEPATCH=y +# CONFIG_LIVEPATCH is not set +# end of Processor type and features + +CONFIG_ARCH_HAS_ADD_PAGES=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_TRACE=y +CONFIG_PM_TRACE_RTC=y +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_ENERGY_MODEL=y +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_FPDT=y +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +CONFIG_ACPI_EC_DEBUGFS=m +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=m +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_PLATFORM_PROFILE=m +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_DEBUG=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_HED=y +CONFIG_ACPI_CUSTOM_METHOD=m +CONFIG_ACPI_BGRT=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +CONFIG_ACPI_APEI_ERST_DEBUG=m +CONFIG_ACPI_DPTF=y +CONFIG_DPTF_POWER=m +CONFIG_DPTF_PCH_FIVR=m +CONFIG_ACPI_WATCHDOG=y +CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_ADXL=y +CONFIG_ACPI_CONFIGFS=m +CONFIG_PMIC_OPREGION=y +CONFIG_BYTCRC_PMIC_OPREGION=y +CONFIG_CHTCRC_PMIC_OPREGION=y +CONFIG_XPOWER_PMIC_OPREGION=y +CONFIG_BXT_WC_PMIC_OPREGION=y +CONFIG_CHT_WC_PMIC_OPREGION=y +CONFIG_CHT_DC_TI_PMIC_OPREGION=y +CONFIG_TPS68470_PMIC_OPREGION=y +CONFIG_X86_PM_TIMER=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +CONFIG_X86_PCC_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_P4_CLOCKMOD=m + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=m +# end of CPU Frequency scaling + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_IDLE_GOV_TEO=y +CONFIG_CPU_IDLE_GOV_HALTPOLL=y +CONFIG_HALTPOLL_CPUIDLE=m +# end of CPU Idle + +CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options + +# +# Bus options (PCI etc.) +# +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_XEN=y +CONFIG_MMCONF_FAM10H=y +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# CONFIG_X86_SYSFB is not set +# end of Bus options (PCI etc.) + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_X86_X32 is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +# end of Binary Emulations + +# +# Firmware Drivers +# +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +CONFIG_FW_CFG_SYSFS=m +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_GOOGLE_FIRMWARE=y +# CONFIG_GOOGLE_SMI is not set +CONFIG_GOOGLE_COREBOOT_TABLE=m +CONFIG_GOOGLE_MEMCONSOLE=m +# CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY is not set +CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT=m +CONFIG_GOOGLE_MEMCONSOLE_COREBOOT=m +CONFIG_GOOGLE_VPD=m + +# +# EFI (Extensible Firmware Interface) Support +# +# CONFIG_EFI_VARS is not set +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_SOFT_RESERVE=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y +CONFIG_EFI_BOOTLOADER_CONTROL=m +CONFIG_EFI_CAPSULE_LOADER=m +# CONFIG_EFI_TEST is not set +CONFIG_APPLE_PROPERTIES=y +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_EFI_RCI2_TABLE=y +# CONFIG_EFI_DISABLE_PCI_DMA is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_EFI_EMBEDDED_FIRMWARE=y +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y +CONFIG_EFI_DEV_PATH_PARSER=y +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_NO_POLL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +# CONFIG_X86_SGX_KVM is not set +CONFIG_KVM_AMD=m +CONFIG_KVM_AMD_SEV=y +CONFIG_KVM_XEN=y +CONFIG_KVM_MMU_AUDIT=y +CONFIG_AS_AVX512=y +CONFIG_AS_SHA1_NI=y +CONFIG_AS_SHA256_NI=y +CONFIG_AS_TPAUSE=y + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HOTPLUG_SMT=y +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_STATIC_CALL_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_ISA_BUS_API=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +CONFIG_LOCK_EVENT_COUNTS=y +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +CONFIG_HAVE_STATIC_CALL=y +CONFIG_HAVE_STATIC_CALL_INLINE=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_HAS_ELFCORE_COMPAT=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +# CONFIG_GCC_PLUGIN_RANDSTRUCT is not set +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +# CONFIG_MODULE_SIG_SHA256 is not set +# CONFIG_MODULE_SIG_SHA384 is not set +CONFIG_MODULE_SIG_SHA512=y +CONFIG_MODULE_SIG_HASH="sha512" +# CONFIG_MODULE_COMPRESS_NONE is not set +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +CONFIG_MODULE_COMPRESS_ZSTD=y +CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS=y +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_DEV_THROTTLING_LOW=y +# CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +CONFIG_BLK_CGROUP_IOLATENCY=y +CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_BLK_WBT_MQ=y +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +CONFIG_BLK_SED_OPAL=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +CONFIG_AIX_PARTITION=y +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +# CONFIG_UNIXWARE_DISKLABEL is not set +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y +CONFIG_BLK_PM=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=y +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_VIRT_TO_BUS=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +CONFIG_CMA_DEBUGFS=y +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=7 +CONFIG_MEM_SOFT_DIRTY=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lz4" +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="z3fold" +CONFIG_ZSWAP_DEFAULT_ON=y +CONFIG_ZPOOL=y +CONFIG_ZBUD=y +CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC=y +# CONFIG_ZSMALLOC_STAT is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DEVICE=y +CONFIG_DEV_PAGEMAP_OPS=y +CONFIG_HMM_MIRROR=y +CONFIG_DEVICE_PRIVATE=y +CONFIG_VMAP_PFN=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MAPPING_DIRTY_HELPERS=y +CONFIG_IO_MAPPING=y +# end of Memory Management options + +CONFIG_NET=y +CONFIG_COMPAT_NETLINK_MESSAGES=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_USER_COMPAT is not set +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_XFRM_ESPINTCP=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_SEG6_BPF=y +CONFIG_IPV6_RPL_LWTUNNEL=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_MPTCP_IPV6=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=m +CONFIG_NETFILTER_XTABLES_COMPAT=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=15 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_TWOS=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_FLOW_TABLE_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +# CONFIG_RDS_DEBUG is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +CONFIG_BRIDGE_CFM=y +CONFIG_NET_DSA=m +CONFIG_NET_DSA_TAG_8021Q=m +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM_COMMON=m +CONFIG_NET_DSA_TAG_BRCM=m +CONFIG_NET_DSA_TAG_BRCM_LEGACY=m +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +CONFIG_NET_DSA_TAG_HELLCREEK=m +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA_COMMON=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_RTL4_A=m +CONFIG_NET_DSA_TAG_OCELOT=m +CONFIG_NET_DSA_TAG_OCELOT_8021Q=m +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m +CONFIG_NET_DSA_TAG_XRS700X=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=m +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_DEBUGFS=y +CONFIG_6LOWPAN_NHC=m +CONFIG_6LOWPAN_NHC_DEST=m +CONFIG_6LOWPAN_NHC_FRAGMENT=m +CONFIG_6LOWPAN_NHC_HOP=m +CONFIG_6LOWPAN_NHC_IPV6=m +CONFIG_6LOWPAN_NHC_MOBILITY=m +CONFIG_6LOWPAN_NHC_ROUTING=m +CONFIG_6LOWPAN_NHC_UDP=m +CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m +CONFIG_6LOWPAN_GHC_UDP=m +CONFIG_6LOWPAN_GHC_ICMPV6=m +CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m +CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m +CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_FQ_PIE is not set +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_CANID=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_BATMAN_V=y +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y +# CONFIG_BATMAN_ADV_DEBUG is not set +# CONFIG_BATMAN_ADV_TRACING is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_HYPERV_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=m +CONFIG_HSR=m +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +CONFIG_QRTR=m +CONFIG_QRTR_SMD=m +CONFIG_QRTR_TUN=m +CONFIG_QRTR_MHI=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +CONFIG_HAMRADIO=y + +# +# Packet Radio protocols +# +CONFIG_AX25=m +CONFIG_AX25_DAMA_SLAVE=y +CONFIG_NETROM=m +CONFIG_ROSE=m + +# +# AX.25 network device drivers +# +CONFIG_MKISS=m +CONFIG_6PACK=m +CONFIG_BPQETHER=m +CONFIG_BAYCOM_SER_FDX=m +CONFIG_BAYCOM_SER_HDX=m +CONFIG_BAYCOM_PAR=m +CONFIG_YAM=m +# end of AX.25 network device drivers + +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +CONFIG_CAN_J1939=m +CONFIG_CAN_ISOTP=m + +# +# CAN Device Drivers +# +CONFIG_CAN_VCAN=m +CONFIG_CAN_VXCAN=m +CONFIG_CAN_SLCAN=m +CONFIG_CAN_DEV=m +CONFIG_CAN_CALC_BITTIMING=y +CONFIG_CAN_JANZ_ICAN3=m +CONFIG_CAN_KVASER_PCIEFD=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set +CONFIG_CAN_CC770_PLATFORM=m +CONFIG_CAN_IFI_CANFD=m +CONFIG_CAN_M_CAN=m +CONFIG_CAN_M_CAN_PCI=m +CONFIG_CAN_M_CAN_PLATFORM=m +CONFIG_CAN_M_CAN_TCAN4X5X=m +CONFIG_CAN_PEAK_PCIEFD=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +# CONFIG_CAN_EMS_PCMCIA is not set +CONFIG_CAN_F81601=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_PEAK_PCMCIA=m +CONFIG_CAN_PLX_PCI=m +# CONFIG_CAN_SJA1000_ISA is not set +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m +CONFIG_CAN_SOFTING_CS=m + +# +# CAN SPI interfaces +# +CONFIG_CAN_HI311X=m +CONFIG_CAN_MCP251X=m +CONFIG_CAN_MCP251XFD=m +# CONFIG_CAN_MCP251XFD_SANITY is not set +# end of CAN SPI interfaces + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_ESD_USB2=m +# CONFIG_CAN_ETAS_ES58X is not set +CONFIG_CAN_GS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_MCBA_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_CAN_UCAN=m +# end of CAN USB interfaces + +# CONFIG_CAN_DEBUG_DEVICES is not set +# end of CAN Device Drivers + +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +# CONFIG_BT_HS is not set +CONFIG_BT_LE=y +CONFIG_BT_6LOWPAN=m +CONFIG_BT_LEDS=y +CONFIG_BT_MSFTEXT=y +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_QCA=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_BCM=y +CONFIG_BT_HCIBTUSB_MTK=y +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_SERDEV=y +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_NOKIA=m +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIUART_LL=y +CONFIG_BT_HCIUART_3WIRE=y +CONFIG_BT_HCIUART_INTEL=y +CONFIG_BT_HCIUART_BCM=y +CONFIG_BT_HCIUART_RTL=y +CONFIG_BT_HCIUART_QCA=y +CONFIG_BT_HCIUART_AG6XX=y +CONFIG_BT_HCIUART_MRVL=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIDTL1=m +CONFIG_BT_HCIBT3C=m +CONFIG_BT_HCIBLUECARD=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +CONFIG_BT_MTKSDIO=m +CONFIG_BT_MTKUART=m +CONFIG_BT_HCIRSI=m +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +CONFIG_AF_RXRPC=m +CONFIG_AF_RXRPC_IPV6=y +# CONFIG_AF_RXRPC_INJECT_LOSS is not set +CONFIG_AF_RXRPC_DEBUG=y +CONFIG_RXKAD=y +CONFIG_AF_KCM=m +CONFIG_STREAM_PARSER=y +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_SPY=y +CONFIG_WEXT_PRIV=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +CONFIG_CFG80211_DEBUGFS=y +CONFIG_CFG80211_CRDA_SUPPORT=y +CONFIG_CFG80211_WEXT=y +CONFIG_CFG80211_WEXT_EXPORT=y +CONFIG_LIB80211=m +CONFIG_LIB80211_CRYPT_WEP=m +CONFIG_LIB80211_CRYPT_CCMP=m +CONFIG_LIB80211_CRYPT_TKIP=m +# CONFIG_LIB80211_DEBUG is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +CONFIG_MAC80211_MESH=y +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +CONFIG_NET_9P=m +CONFIG_NET_9P_VIRTIO=m +CONFIG_NET_9P_XEN=m +CONFIG_NET_9P_RDMA=m +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +CONFIG_CEPH_LIB_PRETTYDEBUG=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +CONFIG_NFC=m +CONFIG_NFC_DIGITAL=m +CONFIG_NFC_NCI=m +CONFIG_NFC_NCI_SPI=m +CONFIG_NFC_NCI_UART=m +CONFIG_NFC_HCI=m +CONFIG_NFC_SHDLC=y + +# +# Near Field Communication (NFC) devices +# +CONFIG_NFC_TRF7970A=m +CONFIG_NFC_MEI_PHY=m +CONFIG_NFC_SIM=m +CONFIG_NFC_PORT100=m +CONFIG_NFC_VIRTUAL_NCI=m +CONFIG_NFC_FDP=m +CONFIG_NFC_FDP_I2C=m +CONFIG_NFC_PN544=m +CONFIG_NFC_PN544_I2C=m +CONFIG_NFC_PN544_MEI=m +CONFIG_NFC_PN533=m +CONFIG_NFC_PN533_USB=m +CONFIG_NFC_PN533_I2C=m +CONFIG_NFC_PN532_UART=m +CONFIG_NFC_MICROREAD=m +CONFIG_NFC_MICROREAD_I2C=m +CONFIG_NFC_MICROREAD_MEI=m +CONFIG_NFC_MRVL=m +CONFIG_NFC_MRVL_USB=m +CONFIG_NFC_MRVL_UART=m +CONFIG_NFC_MRVL_I2C=m +CONFIG_NFC_MRVL_SPI=m +CONFIG_NFC_ST21NFCA=m +CONFIG_NFC_ST21NFCA_I2C=m +CONFIG_NFC_ST_NCI=m +CONFIG_NFC_ST_NCI_I2C=m +CONFIG_NFC_ST_NCI_SPI=m +CONFIG_NFC_NXP_NCI=m +CONFIG_NFC_NXP_NCI_I2C=m +CONFIG_NFC_S3FWRN5=m +CONFIG_NFC_S3FWRN5_I2C=m +CONFIG_NFC_S3FWRN82_UART=m +CONFIG_NFC_ST95HF=m +# end of Near Field Communication (NFC) devices + +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=m +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +CONFIG_PCIE_PTM=y +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m +CONFIG_XEN_PCIDEV_FRONTEND=m +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_P2PDMA=y +CONFIG_PCI_LABEL=y +CONFIG_PCI_HYPERV=m +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +CONFIG_HOTPLUG_PCI_CPCI=y +CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m +CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +CONFIG_VMD=m +CONFIG_PCI_HYPERV_INTERFACE=m + +# +# DesignWare PCI Core Support +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +CONFIG_PCIE_DW_PLAT=y +CONFIG_PCIE_DW_PLAT_HOST=y +CONFIG_PCI_MESON=y +# end of DesignWare PCI Core Support + +# +# Mobiveil PCIe Core Support +# +# end of Mobiveil PCIe Core Support + +# +# Cadence PCIe controllers support +# +# end of Cadence PCIe controllers support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +CONFIG_PCI_SW_SWITCHTEC=m +# end of PCI switch controller drivers + +CONFIG_CXL_BUS=m +CONFIG_CXL_MEM=m +# CONFIG_CXL_MEM_RAW_COMMANDS is not set +CONFIG_PCCARD=m +CONFIG_PCMCIA=m +CONFIG_PCMCIA_LOAD_CIS=y +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +CONFIG_PD6729=m +CONFIG_I82092=m +CONFIG_PCCARD_NONSTATIC=y +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_CACHE=y +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SLIMBUS=m +CONFIG_REGMAP_SPI=y +CONFIG_REGMAP_W1=m +CONFIG_REGMAP_MMIO=y +CONFIG_REGMAP_IRQ=y +CONFIG_REGMAP_SOUNDWIRE=m +CONFIG_REGMAP_SOUNDWIRE_MBQ=m +CONFIG_REGMAP_SCCB=m +CONFIG_REGMAP_SPI_AVMM=m +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# end of Generic Driver Options + +# +# Bus devices +# +CONFIG_MHI_BUS=m +# CONFIG_MHI_BUS_DEBUG is not set +CONFIG_MHI_BUS_PCI_GENERIC=m +# end of Bus devices + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +CONFIG_GNSS=m +CONFIG_GNSS_SERIAL=m +CONFIG_GNSS_MTK_SERIAL=m +CONFIG_GNSS_SIRF_SERIAL=m +CONFIG_GNSS_UBX_SERIAL=m +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +CONFIG_MTD_PSTORE=m +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_RAM=m +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +CONFIG_MTD_PLATRAM=m +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +CONFIG_MTD_PHRAM=m +# CONFIG_MTD_MTDRAM is not set +CONFIG_MTD_BLOCK2MTD=m + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +CONFIG_MTD_NAND_CORE=m +# CONFIG_MTD_ONENAND is not set +CONFIG_MTD_RAW_NAND=m + +# +# Raw/parallel NAND flash controllers +# +# CONFIG_MTD_NAND_DENALI_PCI is not set +# CONFIG_MTD_NAND_CAFE is not set +# CONFIG_MTD_NAND_MXIC is not set +# CONFIG_MTD_NAND_GPIO is not set +# CONFIG_MTD_NAND_PLATFORM is not set +# CONFIG_MTD_NAND_ARASAN is not set + +# +# Misc +# +CONFIG_MTD_NAND_NANDSIM=m +# CONFIG_MTD_NAND_RICOH is not set +# CONFIG_MTD_NAND_DISKONCHIP is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +CONFIG_MTD_NAND_ECC=y +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +CONFIG_PARPORT_PC_FIFO=y +CONFIG_PARPORT_PC_SUPERIO=y +CONFIG_PARPORT_PC_PCMCIA=m +CONFIG_PARPORT_AX88796=m +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_BLK_DEV_FD=m +CONFIG_CDROM=m +# CONFIG_PARIDE is not set +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +# CONFIG_ZRAM_DEF_COMP_842 is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_SX8=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +CONFIG_ATA_OVER_ETH=m +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_XEN_BLKDEV_BACKEND=m +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_RSXX=m +CONFIG_BLK_DEV_RNBD=y +CONFIG_BLK_DEV_RNBD_CLIENT=m +CONFIG_BLK_DEV_RNBD_SERVER=m + +# +# NVME Support +# +CONFIG_NVME_CORE=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_HWMON=y +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_PASSTHRU=y +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +CONFIG_AD525X_DPOT=m +CONFIG_AD525X_DPOT_I2C=m +CONFIG_AD525X_DPOT_SPI=m +# CONFIG_DUMMY_IRQ is not set +CONFIG_IBM_ASM=m +CONFIG_PHANTOM=m +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +CONFIG_ICS932S401=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_HP_ILO=m +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +CONFIG_HMC6352=m +CONFIG_DS1682=m +CONFIG_VMWARE_BALLOON=m +CONFIG_LATTICE_ECP3_CONFIG=m +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +CONFIG_PCI_ENDPOINT_TEST=m +CONFIG_XILINX_SDFEC=m +CONFIG_MISC_RTSX=m +CONFIG_C2PORT=m +CONFIG_C2PORT_DURAMAR_2150=m + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +CONFIG_EEPROM_IDT_89HPESX=m +CONFIG_EEPROM_EE1004=m +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +CONFIG_TI_ST=m +# end of Texas Instruments shared transport line discipline + +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m +CONFIG_INTEL_MEI=m +CONFIG_INTEL_MEI_ME=m +CONFIG_INTEL_MEI_TXE=m +CONFIG_INTEL_MEI_HDCP=m +CONFIG_VMWARE_VMCI=m +CONFIG_GENWQE=m +CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0 +CONFIG_ECHO=m +CONFIG_BCM_VK=m +CONFIG_BCM_VK_TTY=y +CONFIG_MISC_ALCOR_PCI=m +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_HABANA_AI=m +CONFIG_UACCE=m +# CONFIG_PVPANIC is not set +# end of Misc devices + +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +CONFIG_BLK_DEV_3W_XXXX_RAID=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_3W_9XXX=m +CONFIG_SCSI_3W_SAS=m +CONFIG_SCSI_ACARD=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_AIC7XXX=m +CONFIG_AIC7XXX_CMDS_PER_DEVICE=32 +CONFIG_AIC7XXX_RESET_DELAY_MS=15000 +CONFIG_AIC7XXX_DEBUG_ENABLE=y +CONFIG_AIC7XXX_DEBUG_MASK=0 +CONFIG_AIC7XXX_REG_PRETTY_PRINT=y +CONFIG_SCSI_AIC79XX=m +CONFIG_AIC79XX_CMDS_PER_DEVICE=32 +CONFIG_AIC79XX_RESET_DELAY_MS=15000 +CONFIG_AIC79XX_DEBUG_ENABLE=y +CONFIG_AIC79XX_DEBUG_MASK=0 +CONFIG_AIC79XX_REG_PRETTY_PRINT=y +CONFIG_SCSI_AIC94XX=m +CONFIG_AIC94XX_DEBUG=y +CONFIG_SCSI_MVSAS=m +CONFIG_SCSI_MVSAS_DEBUG=y +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=m +CONFIG_SCSI_DPT_I2O=m +CONFIG_SCSI_ADVANSYS=m +CONFIG_SCSI_ARCMSR=m +CONFIG_SCSI_ESAS2R=m +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_MAILBOX=m +CONFIG_MEGARAID_LEGACY=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_SCSI_UFSHCD=m +CONFIG_SCSI_UFSHCD_PCI=m +# CONFIG_SCSI_UFS_DWC_TC_PCI is not set +CONFIG_SCSI_UFSHCD_PLATFORM=m +CONFIG_SCSI_UFS_CDNS_PLATFORM=m +# CONFIG_SCSI_UFS_DWC_TC_PLATFORM is not set +CONFIG_SCSI_UFS_BSG=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_HPTIOP=m +CONFIG_SCSI_BUSLOGIC=m +CONFIG_SCSI_FLASHPOINT=y +CONFIG_SCSI_MYRB=m +CONFIG_SCSI_MYRS=m +CONFIG_VMWARE_PVSCSI=m +CONFIG_XEN_SCSI_FRONTEND=m +CONFIG_HYPERV_STORAGE=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_FCOE_FNIC=m +CONFIG_SCSI_SNIC=m +# CONFIG_SCSI_SNIC_DEBUG_FS is not set +CONFIG_SCSI_DMX3191D=m +CONFIG_SCSI_FDOMAIN=m +CONFIG_SCSI_FDOMAIN_PCI=m +CONFIG_SCSI_ISCI=m +CONFIG_SCSI_IPS=m +CONFIG_SCSI_INITIO=m +CONFIG_SCSI_INIA100=m +CONFIG_SCSI_PPA=m +CONFIG_SCSI_IMM=m +# CONFIG_SCSI_IZIP_EPP16 is not set +# CONFIG_SCSI_IZIP_SLOW_CTR is not set +CONFIG_SCSI_STEX=m +CONFIG_SCSI_SYM53C8XX_2=m +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 +CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 +CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 +CONFIG_SCSI_SYM53C8XX_MMIO=y +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +# CONFIG_TCM_QLA2XXX_DEBUG is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +CONFIG_SCSI_DC395x=m +CONFIG_SCSI_AM53C974=m +CONFIG_SCSI_WD719X=m +CONFIG_SCSI_DEBUG=m +CONFIG_SCSI_PMCRAID=m +CONFIG_SCSI_PM8001=m +CONFIG_SCSI_BFA_FC=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_LOWLEVEL_PCMCIA=y +CONFIG_PCMCIA_AHA152X=m +CONFIG_PCMCIA_FDOMAIN=m +CONFIG_PCMCIA_QLOGIC=m +CONFIG_PCMCIA_SYM53C500=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=m +CONFIG_SCSI_DH_HP_SW=m +CONFIG_SCSI_DH_EMC=m +CONFIG_SCSI_DH_ALUA=m +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +CONFIG_SATA_ZPODD=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=3 +CONFIG_SATA_AHCI_PLATFORM=m +CONFIG_SATA_INIC162X=m +CONFIG_SATA_ACARD_AHCI=m +CONFIG_SATA_SIL24=m +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +CONFIG_PDC_ADMA=m +CONFIG_SATA_QSTOR=m +CONFIG_SATA_SX4=m +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +CONFIG_SATA_DWC=m +# CONFIG_SATA_DWC_OLD_DMA is not set +# CONFIG_SATA_DWC_DEBUG is not set +CONFIG_SATA_MV=m +CONFIG_SATA_NV=m +CONFIG_SATA_PROMISE=m +CONFIG_SATA_SIL=m +CONFIG_SATA_SIS=m +CONFIG_SATA_SVW=m +CONFIG_SATA_ULI=m +CONFIG_SATA_VIA=m +CONFIG_SATA_VITESSE=m + +# +# PATA SFF controllers with BMDMA +# +CONFIG_PATA_ALI=m +CONFIG_PATA_AMD=m +CONFIG_PATA_ARTOP=m +CONFIG_PATA_ATIIXP=m +CONFIG_PATA_ATP867X=m +CONFIG_PATA_CMD64X=m +CONFIG_PATA_CYPRESS=m +CONFIG_PATA_EFAR=m +CONFIG_PATA_HPT366=m +CONFIG_PATA_HPT37X=m +CONFIG_PATA_HPT3X2N=m +CONFIG_PATA_HPT3X3=m +CONFIG_PATA_HPT3X3_DMA=y +CONFIG_PATA_IT8213=m +CONFIG_PATA_IT821X=m +CONFIG_PATA_JMICRON=m +CONFIG_PATA_MARVELL=m +CONFIG_PATA_NETCELL=m +CONFIG_PATA_NINJA32=m +CONFIG_PATA_NS87415=m +CONFIG_PATA_OLDPIIX=m +CONFIG_PATA_OPTIDMA=m +CONFIG_PATA_PDC2027X=m +CONFIG_PATA_PDC_OLD=m +CONFIG_PATA_RADISYS=m +CONFIG_PATA_RDC=m +CONFIG_PATA_SCH=m +CONFIG_PATA_SERVERWORKS=m +CONFIG_PATA_SIL680=m +CONFIG_PATA_SIS=m +CONFIG_PATA_TOSHIBA=m +CONFIG_PATA_TRIFLEX=m +CONFIG_PATA_VIA=m +CONFIG_PATA_WINBOND=m + +# +# PIO-only SFF controllers +# +CONFIG_PATA_CMD640_PCI=m +CONFIG_PATA_MPIIX=m +CONFIG_PATA_NS87410=m +CONFIG_PATA_OPTI=m +CONFIG_PATA_PCMCIA=m +CONFIG_PATA_RZ1000=m + +# +# Generic fallback / legacy drivers +# +CONFIG_PATA_ACPI=m +CONFIG_ATA_GENERIC=m +CONFIG_PATA_LEGACY=m +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +CONFIG_BCACHE_ASYNC_REGISTRATION=y +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +# CONFIG_DM_DEBUG_BLOCK_STACK_TRACING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_EBS=m +CONFIG_DM_ERA=m +CONFIG_DM_CLONE=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_MULTIPATH_HST=m +CONFIG_DM_MULTIPATH_IOA=m +CONFIG_DM_DELAY=m +CONFIG_DM_DUST=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_SECONDARY_KEYRING is not set +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_DM_ZONED=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_TCM_FC=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +CONFIG_SBP_TARGET=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_FC=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LAN=m +# CONFIG_FUSION_LOGGING is not set + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +CONFIG_FIREWIRE_NOSY=m +# end of IEEE 1394 (FireWire) support + +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MAC_EMUMOUSEBTN=m +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +CONFIG_EQUALIZER=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_BAREUDP=m +CONFIG_GTP=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_NTB_NETDEV=m +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +CONFIG_MHI_NET=m +CONFIG_SUNGEM_PHY=m +# CONFIG_ARCNET is not set +CONFIG_ATM_DRIVERS=y +# CONFIG_ATM_DUMMY is not set +CONFIG_ATM_TCP=m +CONFIG_ATM_LANAI=m +CONFIG_ATM_ENI=m +# CONFIG_ATM_ENI_DEBUG is not set +# CONFIG_ATM_ENI_TUNE_BURST is not set +CONFIG_ATM_FIRESTREAM=m +CONFIG_ATM_ZATM=m +# CONFIG_ATM_ZATM_DEBUG is not set +CONFIG_ATM_NICSTAR=m +# CONFIG_ATM_NICSTAR_USE_SUNI is not set +# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set +CONFIG_ATM_IDT77252=m +# CONFIG_ATM_IDT77252_DEBUG is not set +# CONFIG_ATM_IDT77252_RCV_ALL is not set +CONFIG_ATM_IDT77252_USE_SUNI=y +CONFIG_ATM_AMBASSADOR=m +# CONFIG_ATM_AMBASSADOR_DEBUG is not set +CONFIG_ATM_HORIZON=m +# CONFIG_ATM_HORIZON_DEBUG is not set +CONFIG_ATM_IA=m +# CONFIG_ATM_IA_DEBUG is not set +CONFIG_ATM_FORE200E=m +CONFIG_ATM_FORE200E_USE_TASKLET=y +CONFIG_ATM_FORE200E_TX_RETRY=16 +CONFIG_ATM_FORE200E_DEBUG=0 +CONFIG_ATM_HE=m +CONFIG_ATM_HE_USE_SUNI=y +CONFIG_ATM_SOLOS=m + +# +# Distributed Switch Architecture drivers +# +CONFIG_B53=m +CONFIG_B53_SPI_DRIVER=m +CONFIG_B53_MDIO_DRIVER=m +CONFIG_B53_MMAP_DRIVER=m +CONFIG_B53_SRAB_DRIVER=m +CONFIG_B53_SERDES=m +CONFIG_NET_DSA_BCM_SF2=m +CONFIG_NET_DSA_LOOP=m +CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK=m +# CONFIG_NET_DSA_LANTIQ_GSWIP is not set +CONFIG_NET_DSA_MT7530=m +CONFIG_NET_DSA_MV88E6060=m +CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON=m +CONFIG_NET_DSA_MICROCHIP_KSZ9477=m +CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C=m +CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI=m +CONFIG_NET_DSA_MICROCHIP_KSZ8795=m +CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI=m +# CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI is not set +CONFIG_NET_DSA_MV88E6XXX=m +CONFIG_NET_DSA_MV88E6XXX_PTP=y +CONFIG_NET_DSA_MSCC_SEVILLE=m +CONFIG_NET_DSA_AR9331=m +CONFIG_NET_DSA_SJA1105=m +CONFIG_NET_DSA_SJA1105_PTP=y +CONFIG_NET_DSA_SJA1105_TAS=y +CONFIG_NET_DSA_SJA1105_VL=y +CONFIG_NET_DSA_XRS700X=m +CONFIG_NET_DSA_XRS700X_I2C=m +CONFIG_NET_DSA_XRS700X_MDIO=m +CONFIG_NET_DSA_QCA8K=m +CONFIG_NET_DSA_REALTEK_SMI=m +CONFIG_NET_DSA_SMSC_LAN9303=m +CONFIG_NET_DSA_SMSC_LAN9303_I2C=m +CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m +CONFIG_NET_DSA_VITESSE_VSC73XX=m +CONFIG_NET_DSA_VITESSE_VSC73XX_SPI=m +CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM=m +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +CONFIG_NET_VENDOR_3COM=y +CONFIG_PCMCIA_3C574=m +CONFIG_PCMCIA_3C589=m +CONFIG_VORTEX=m +CONFIG_TYPHOON=m +CONFIG_NET_VENDOR_ADAPTEC=y +CONFIG_ADAPTEC_STARFIRE=m +CONFIG_NET_VENDOR_AGERE=y +CONFIG_ET131X=m +CONFIG_NET_VENDOR_ALACRITECH=y +CONFIG_SLICOSS=m +CONFIG_NET_VENDOR_ALTEON=y +CONFIG_ACENIC=m +# CONFIG_ACENIC_OMIT_TIGON_I is not set +CONFIG_ALTERA_TSE=m +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +CONFIG_NET_VENDOR_AMD=y +CONFIG_AMD8111_ETH=m +CONFIG_PCNET32=m +CONFIG_PCMCIA_NMCLAN=m +CONFIG_AMD_XGBE=m +CONFIG_AMD_XGBE_DCB=y +CONFIG_AMD_XGBE_HAVE_ECC=y +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_AQTION=m +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +CONFIG_NET_VENDOR_BROADCOM=y +CONFIG_B44=m +CONFIG_B44_PCI_AUTOSELECT=y +CONFIG_B44_PCICORE_AUTOSELECT=y +CONFIG_B44_PCI=y +CONFIG_BCMGENET=m +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +CONFIG_SYSTEMPORT=m +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_BROCADE=y +CONFIG_BNA=m +CONFIG_NET_VENDOR_CADENCE=y +CONFIG_MACB=m +CONFIG_MACB_USE_HWSTAMP=y +CONFIG_MACB_PCI=m +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_CAVIUM_PTP=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4_DCB=y +CONFIG_CHELSIO_T4_FCOE=y +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +CONFIG_CHELSIO_IPSEC_INLINE=m +CONFIG_CHELSIO_TLS_DEVICE=m +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +CONFIG_NET_VENDOR_CORTINA=y +CONFIG_CX_ECAT=m +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +CONFIG_NET_TULIP=y +CONFIG_DE2104X=m +CONFIG_DE2104X_DSL=0 +CONFIG_TULIP=m +CONFIG_TULIP_MWI=y +CONFIG_TULIP_MMIO=y +CONFIG_TULIP_NAPI=y +CONFIG_TULIP_NAPI_HW_MITIGATION=y +CONFIG_DE4X5=m +CONFIG_WINBOND_840=m +CONFIG_DM9102=m +CONFIG_ULI526X=m +CONFIG_PCMCIA_XIRCOM=m +CONFIG_NET_VENDOR_DLINK=y +CONFIG_DL2K=m +CONFIG_SUNDANCE=m +# CONFIG_SUNDANCE_MMIO is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +CONFIG_BE2NET_BE2=y +CONFIG_BE2NET_BE3=y +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_NET_VENDOR_EZCHIP=y +CONFIG_NET_VENDOR_FUJITSU=y +CONFIG_PCMCIA_FMVJ18X=m +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +CONFIG_NET_VENDOR_I825XX=y +CONFIG_NET_VENDOR_INTEL=y +CONFIG_E100=m +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGB_DCA=y +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +# CONFIG_IXGBE_IPSEC is not set +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +CONFIG_IGC=m +CONFIG_NET_VENDOR_MICROSOFT=y +# CONFIG_MICROSOFT_MANA is not set +CONFIG_JME=m +CONFIG_NET_VENDOR_MARVELL=y +CONFIG_MVMDIO=m +CONFIG_SKGE=m +# CONFIG_SKGE_DEBUG is not set +CONFIG_SKGE_GENESIS=y +CONFIG_SKY2=m +# CONFIG_SKY2_DEBUG is not set +CONFIG_PRESTERA=m +CONFIG_PRESTERA_PCI=m +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +CONFIG_MLX4_CORE_GEN2=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_ACCEL=y +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLX5_FPGA_IPSEC=y +CONFIG_MLX5_IPSEC=y +CONFIG_MLX5_EN_IPSEC=y +CONFIG_MLX5_FPGA_TLS=y +CONFIG_MLX5_TLS=y +CONFIG_MLX5_EN_TLS=y +CONFIG_MLX5_SW_STEERING=y +CONFIG_MLX5_SF=y +CONFIG_MLX5_SF_MANAGER=y +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SWITCHIB=m +CONFIG_MLXSW_SWITCHX2=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +CONFIG_NET_VENDOR_MICREL=y +CONFIG_KS8842=m +CONFIG_KS8851=m +CONFIG_KS8851_MLL=m +CONFIG_KSZ884X_PCI=m +CONFIG_NET_VENDOR_MICROCHIP=y +CONFIG_ENC28J60=m +# CONFIG_ENC28J60_WRITEVERIFY is not set +CONFIG_ENCX24J600=m +CONFIG_LAN743X=m +CONFIG_NET_VENDOR_MICROSEMI=y +CONFIG_MSCC_OCELOT_SWITCH_LIB=m +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +CONFIG_FEALNX=m +CONFIG_NET_VENDOR_NATSEMI=y +CONFIG_NATSEMI=m +CONFIG_NS83820=m +CONFIG_NET_VENDOR_NETERION=y +CONFIG_S2IO=m +CONFIG_VXGE=m +# CONFIG_VXGE_DEBUG_TRACE_ALL is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +# CONFIG_NFP_DEBUG is not set +CONFIG_NET_VENDOR_NI=y +CONFIG_NI_XGE_MANAGEMENT_ENET=m +CONFIG_NET_VENDOR_8390=y +CONFIG_PCMCIA_AXNET=m +CONFIG_NE2K_PCI=m +CONFIG_PCMCIA_PCNET=m +CONFIG_NET_VENDOR_NVIDIA=y +CONFIG_FORCEDETH=m +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +CONFIG_HAMACHI=m +CONFIG_YELLOWFIN=m +CONFIG_NET_VENDOR_PENSANDO=y +CONFIG_IONIC=m +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +CONFIG_QLCNIC=m +CONFIG_QLCNIC_SRIOV=y +CONFIG_QLCNIC_DCB=y +CONFIG_QLCNIC_HWMON=y +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +CONFIG_NET_VENDOR_QUALCOMM=y +CONFIG_QCOM_EMAC=m +CONFIG_RMNET=m +CONFIG_NET_VENDOR_RDC=y +CONFIG_R6040=m +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_ATP=m +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +CONFIG_8139TOO_TUNE_TWISTER=y +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +CONFIG_NET_VENDOR_RENESAS=y +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +CONFIG_NET_VENDOR_SAMSUNG=y +CONFIG_SXGBE_ETH=m +CONFIG_NET_VENDOR_SEEQ=y +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_SRIOV=y +CONFIG_SFC_MCDI_LOGGING=y +CONFIG_SFC_FALCON=m +CONFIG_SFC_FALCON_MTD=y +CONFIG_NET_VENDOR_SILAN=y +CONFIG_SC92031=m +CONFIG_NET_VENDOR_SIS=y +CONFIG_SIS900=m +CONFIG_SIS190=m +CONFIG_NET_VENDOR_SMSC=y +CONFIG_PCMCIA_SMC91C92=m +CONFIG_EPIC100=m +CONFIG_SMSC911X=m +CONFIG_SMSC9420=m +CONFIG_NET_VENDOR_SOCIONEXT=y +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_STMMAC_ETH=m +# CONFIG_STMMAC_SELFTESTS is not set +CONFIG_STMMAC_PLATFORM=m +CONFIG_DWMAC_GENERIC=m +CONFIG_DWMAC_INTEL=m +CONFIG_STMMAC_PCI=m +CONFIG_NET_VENDOR_SUN=y +CONFIG_HAPPYMEAL=m +CONFIG_SUNGEM=m +CONFIG_CASSINI=m +CONFIG_NIU=m +CONFIG_NET_VENDOR_SYNOPSYS=y +CONFIG_DWC_XLGMAC=m +CONFIG_DWC_XLGMAC_PCI=m +CONFIG_NET_VENDOR_TEHUTI=y +CONFIG_TEHUTI=m +CONFIG_NET_VENDOR_TI=y +# CONFIG_TI_CPSW_PHY_SEL is not set +CONFIG_TLAN=m +CONFIG_NET_VENDOR_VIA=y +CONFIG_VIA_RHINE=m +CONFIG_VIA_RHINE_MMIO=y +CONFIG_VIA_VELOCITY=m +CONFIG_NET_VENDOR_WIZNET=y +CONFIG_WIZNET_W5100=m +CONFIG_WIZNET_W5300=m +# CONFIG_WIZNET_BUS_DIRECT is not set +# CONFIG_WIZNET_BUS_INDIRECT is not set +CONFIG_WIZNET_BUS_ANY=y +CONFIG_WIZNET_W5100_SPI=m +CONFIG_NET_VENDOR_XILINX=y +CONFIG_XILINX_EMACLITE=m +CONFIG_XILINX_AXI_EMAC=m +CONFIG_XILINX_LL_TEMAC=m +CONFIG_NET_VENDOR_XIRCOM=y +CONFIG_PCMCIA_XIRC2PS=m +CONFIG_FDDI=m +CONFIG_DEFXX=m +CONFIG_SKFP=m +# CONFIG_HIPPI is not set +CONFIG_NET_SB1000=m +CONFIG_PHYLINK=m +CONFIG_PHYLIB=m +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_FIXED_PHY=m +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +CONFIG_ADIN_PHY=m +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_BCM54140_PHY=m +CONFIG_BCM7XXX_PHY=m +CONFIG_BCM84881_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88X2222_PHY is not set +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_C45_TJA11XX_PHY is not set +CONFIG_NXP_TJA11XX_PHY=m +CONFIG_AT803X_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +# CONFIG_ROCKCHIP_PHY is not set +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_DP83869_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +CONFIG_MDIO_DEVICE=m +CONFIG_MDIO_BUS=m +CONFIG_MDIO_DEVRES=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_CAVIUM=m +CONFIG_MDIO_GPIO=m +CONFIG_MDIO_I2C=m +CONFIG_MDIO_MVUSB=m +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +CONFIG_PCS_LYNX=m +# end of PCS device drivers + +CONFIG_PLIP=m +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_SLIP_MODE_SLIP6=y +CONFIG_USB_NET_DRIVERS=m +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +CONFIG_USB_NET_SR9800=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_CDC_PHONET=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +CONFIG_USB_NET_AQC111=m +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y +CONFIG_WLAN_VENDOR_ADMTEK=y +CONFIG_ADM8211=m +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +CONFIG_ATH5K=m +CONFIG_ATH5K_DEBUG=y +CONFIG_ATH5K_TRACER=y +CONFIG_ATH5K_PCI=y +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_COMMON_DEBUG=y +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_DEBUGFS=y +CONFIG_ATH9K_STATION_STATISTICS=y +CONFIG_ATH9K_DYNACK=y +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +CONFIG_ATH9K_CHANNEL_CONTEXT=y +CONFIG_ATH9K_PCOEM=y +CONFIG_ATH9K_PCI_NO_EEPROM=m +CONFIG_ATH9K_HTC=m +CONFIG_ATH9K_HTC_DEBUGFS=y +CONFIG_ATH9K_HWRNG=y +CONFIG_ATH9K_COMMON_SPECTRAL=y +CONFIG_CARL9170=m +CONFIG_CARL9170_LEDS=y +CONFIG_CARL9170_DEBUGFS=y +CONFIG_CARL9170_WPC=y +# CONFIG_CARL9170_HWRNG is not set +CONFIG_ATH6KL=m +CONFIG_ATH6KL_SDIO=m +CONFIG_ATH6KL_USB=m +CONFIG_ATH6KL_DEBUG=y +CONFIG_ATH6KL_TRACING=y +CONFIG_AR5523=m +CONFIG_WIL6210=m +CONFIG_WIL6210_ISR_COR=y +CONFIG_WIL6210_TRACING=y +CONFIG_WIL6210_DEBUGFS=y +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +CONFIG_ATH10K_SDIO=m +CONFIG_ATH10K_USB=m +CONFIG_ATH10K_DEBUG=y +CONFIG_ATH10K_DEBUGFS=y +CONFIG_ATH10K_SPECTRAL=y +CONFIG_ATH10K_TRACING=y +CONFIG_WCN36XX=m +CONFIG_WCN36XX_DEBUGFS=y +CONFIG_ATH11K=m +CONFIG_ATH11K_AHB=m +CONFIG_ATH11K_PCI=m +CONFIG_ATH11K_DEBUG=y +CONFIG_ATH11K_DEBUGFS=y +# CONFIG_ATH11K_TRACING is not set +CONFIG_ATH11K_SPECTRAL=y +CONFIG_WLAN_VENDOR_ATMEL=y +CONFIG_ATMEL=m +CONFIG_PCI_ATMEL=m +CONFIG_PCMCIA_ATMEL=m +CONFIG_AT76C50X_USB=m +CONFIG_WLAN_VENDOR_BROADCOM=y +CONFIG_B43=m +CONFIG_B43_BCMA=y +CONFIG_B43_SSB=y +CONFIG_B43_BUSES_BCMA_AND_SSB=y +# CONFIG_B43_BUSES_BCMA is not set +# CONFIG_B43_BUSES_SSB is not set +CONFIG_B43_PCI_AUTOSELECT=y +CONFIG_B43_PCICORE_AUTOSELECT=y +CONFIG_B43_SDIO=y +CONFIG_B43_BCMA_PIO=y +CONFIG_B43_PIO=y +CONFIG_B43_PHY_G=y +CONFIG_B43_PHY_N=y +CONFIG_B43_PHY_LP=y +CONFIG_B43_PHY_HT=y +CONFIG_B43_LEDS=y +CONFIG_B43_HWRNG=y +# CONFIG_B43_DEBUG is not set +CONFIG_B43LEGACY=m +CONFIG_B43LEGACY_PCI_AUTOSELECT=y +CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y +CONFIG_B43LEGACY_LEDS=y +CONFIG_B43LEGACY_HWRNG=y +CONFIG_B43LEGACY_DEBUG=y +CONFIG_B43LEGACY_DMA=y +CONFIG_B43LEGACY_PIO=y +CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y +# CONFIG_B43LEGACY_DMA_MODE is not set +# CONFIG_B43LEGACY_PIO_MODE is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +CONFIG_BRCM_TRACING=y +CONFIG_BRCMDBG=y +CONFIG_WLAN_VENDOR_CISCO=y +CONFIG_AIRO=m +CONFIG_AIRO_CS=m +CONFIG_WLAN_VENDOR_INTEL=y +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y +# CONFIG_IPW2100_DEBUG is not set +CONFIG_IPW2200=m +CONFIG_IPW2200_MONITOR=y +CONFIG_IPW2200_RADIOTAP=y +CONFIG_IPW2200_PROMISCUOUS=y +CONFIG_IPW2200_QOS=y +# CONFIG_IPW2200_DEBUG is not set +CONFIG_LIBIPW=m +# CONFIG_LIBIPW_DEBUG is not set +CONFIG_IWLEGACY=m +CONFIG_IWL4965=m +CONFIG_IWL3945=m + +# +# iwl3945 / iwl4965 Debugging Options +# +CONFIG_IWLEGACY_DEBUG=y +CONFIG_IWLEGACY_DEBUGFS=y +# end of iwl3945 / iwl4965 Debugging Options + +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y +# CONFIG_IWLWIFI_BCAST_FILTERING is not set + +# +# Debugging Options +# +CONFIG_IWLWIFI_DEBUG=y +CONFIG_IWLWIFI_DEBUGFS=y +CONFIG_IWLWIFI_DEVICE_TRACING=y +# end of Debugging Options + +CONFIG_WLAN_VENDOR_INTERSIL=y +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_PCI=m +CONFIG_HOSTAP_CS=m +CONFIG_HERMES=m +CONFIG_HERMES_PRISM=y +CONFIG_HERMES_CACHE_FW_ON_INIT=y +CONFIG_PLX_HERMES=m +CONFIG_TMD_HERMES=m +CONFIG_NORTEL_HERMES=m +CONFIG_PCI_HERMES=m +CONFIG_PCMCIA_HERMES=m +CONFIG_PCMCIA_SPECTRUM=m +CONFIG_ORINOCO_USB=m +CONFIG_P54_COMMON=m +CONFIG_P54_USB=m +CONFIG_P54_PCI=m +CONFIG_P54_SPI=m +# CONFIG_P54_SPI_DEFAULT_EEPROM is not set +CONFIG_P54_LEDS=y +CONFIG_PRISM54=m +CONFIG_WLAN_VENDOR_MARVELL=y +CONFIG_LIBERTAS=m +CONFIG_LIBERTAS_USB=m +CONFIG_LIBERTAS_CS=m +CONFIG_LIBERTAS_SDIO=m +CONFIG_LIBERTAS_SPI=m +# CONFIG_LIBERTAS_DEBUG is not set +CONFIG_LIBERTAS_MESH=y +CONFIG_LIBERTAS_THINFIRM=m +# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set +CONFIG_LIBERTAS_THINFIRM_USB=m +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +CONFIG_MWL8K=m +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76_SDIO=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76_CONNAC_LIB=m +CONFIG_MT76x0_COMMON=m +CONFIG_MT76x0U=m +CONFIG_MT76x0E=m +CONFIG_MT76x2_COMMON=m +CONFIG_MT76x2E=m +CONFIG_MT76x2U=m +CONFIG_MT7603E=m +CONFIG_MT7615_COMMON=m +CONFIG_MT7615E=m +CONFIG_MT7663_USB_SDIO_COMMON=m +CONFIG_MT7663U=m +CONFIG_MT7663S=m +CONFIG_MT7915E=m +CONFIG_MT7921E=m +CONFIG_WLAN_VENDOR_MICROCHIP=y +CONFIG_WILC1000=m +CONFIG_WILC1000_SDIO=m +CONFIG_WILC1000_SPI=m +# CONFIG_WILC1000_HW_OOB_INTR is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +CONFIG_RT2400PCI=m +CONFIG_RT2500PCI=m +CONFIG_RT61PCI=m +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +CONFIG_RT2500USB=m +CONFIG_RT73USB=m +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +CONFIG_RT2X00_LIB_DEBUGFS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +CONFIG_RTL8180=m +CONFIG_RTL8187=m +CONFIG_RTL8187_LEDS=y +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +CONFIG_RTLWIFI_DEBUG=y +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +CONFIG_RTL8XXXU_UNTESTED=y +CONFIG_RTW88=m +CONFIG_RTW88_CORE=m +CONFIG_RTW88_PCI=m +CONFIG_RTW88_8822B=m +CONFIG_RTW88_8822C=m +CONFIG_RTW88_8723D=m +CONFIG_RTW88_8821C=m +CONFIG_RTW88_8822BE=m +CONFIG_RTW88_8822CE=m +CONFIG_RTW88_8723DE=m +CONFIG_RTW88_8821CE=m +CONFIG_RTW88_DEBUG=y +CONFIG_RTW88_DEBUGFS=y +CONFIG_WLAN_VENDOR_RSI=y +CONFIG_RSI_91X=m +CONFIG_RSI_DEBUGFS=y +CONFIG_RSI_SDIO=m +CONFIG_RSI_USB=m +CONFIG_RSI_COEX=y +CONFIG_WLAN_VENDOR_ST=y +CONFIG_CW1200=m +CONFIG_CW1200_WLAN_SDIO=m +CONFIG_CW1200_WLAN_SPI=m +CONFIG_WLAN_VENDOR_TI=y +CONFIG_WL1251=m +CONFIG_WL1251_SPI=m +CONFIG_WL1251_SDIO=m +CONFIG_WL12XX=m +CONFIG_WL18XX=m +CONFIG_WLCORE=m +CONFIG_WLCORE_SDIO=m +CONFIG_WILINK_PLATFORM_DATA=y +CONFIG_WLAN_VENDOR_ZYDAS=y +CONFIG_USB_ZD1201=m +CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +CONFIG_QTNFMAC=m +CONFIG_QTNFMAC_PCIE=m +CONFIG_PCMCIA_RAYCS=m +CONFIG_PCMCIA_WL3501=m +CONFIG_MAC80211_HWSIM=m +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_VIRT_WIFI=m +# CONFIG_WAN is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_IEEE802154_AT86RF230=m +# CONFIG_IEEE802154_AT86RF230_DEBUGFS is not set +CONFIG_IEEE802154_MRF24J40=m +CONFIG_IEEE802154_CC2520=m +CONFIG_IEEE802154_ATUSB=m +CONFIG_IEEE802154_ADF7242=m +CONFIG_IEEE802154_CA8210=m +# CONFIG_IEEE802154_CA8210_DEBUGFS is not set +CONFIG_IEEE802154_MCR20A=m +CONFIG_IEEE802154_HWSIM=m +# CONFIG_WWAN is not set +CONFIG_XEN_NETDEV_FRONTEND=m +CONFIG_XEN_NETDEV_BACKEND=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_USB4_NET=m +CONFIG_HYPERV_NET=m +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m +CONFIG_NVM=y +CONFIG_NVM_PBLK=m +# CONFIG_NVM_PBLK_DEBUG is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +CONFIG_INPUT_MATRIXKMAP=m + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=m +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ADC=m +CONFIG_KEYBOARD_ADP5520=m +CONFIG_KEYBOARD_ADP5588=m +CONFIG_KEYBOARD_ADP5589=m +CONFIG_KEYBOARD_APPLESPI=m +CONFIG_KEYBOARD_ATKBD=m +CONFIG_KEYBOARD_QT1050=m +CONFIG_KEYBOARD_QT1070=m +CONFIG_KEYBOARD_QT2160=m +CONFIG_KEYBOARD_DLINK_DIR685=m +CONFIG_KEYBOARD_LKKBD=m +CONFIG_KEYBOARD_GPIO=m +CONFIG_KEYBOARD_GPIO_POLLED=m +CONFIG_KEYBOARD_TCA6416=m +CONFIG_KEYBOARD_TCA8418=m +CONFIG_KEYBOARD_MATRIX=m +CONFIG_KEYBOARD_LM8323=m +CONFIG_KEYBOARD_LM8333=m +CONFIG_KEYBOARD_MAX7359=m +CONFIG_KEYBOARD_MCS=m +CONFIG_KEYBOARD_MPR121=m +CONFIG_KEYBOARD_NEWTON=m +CONFIG_KEYBOARD_OPENCORES=m +CONFIG_KEYBOARD_SAMSUNG=m +CONFIG_KEYBOARD_STOWAWAY=m +CONFIG_KEYBOARD_SUNKBD=m +CONFIG_KEYBOARD_IQS62X=m +CONFIG_KEYBOARD_TM2_TOUCHKEY=m +CONFIG_KEYBOARD_TWL4030=m +CONFIG_KEYBOARD_XTKBD=m +CONFIG_KEYBOARD_CROS_EC=m +CONFIG_KEYBOARD_MTK_PMIC=m +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +CONFIG_MOUSE_PS2_TOUCHKIT=y +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_VMMOUSE=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +CONFIG_MOUSE_GPIO=m +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_ANALOG=m +CONFIG_JOYSTICK_A3D=m +CONFIG_JOYSTICK_ADC=m +CONFIG_JOYSTICK_ADI=m +CONFIG_JOYSTICK_COBRA=m +CONFIG_JOYSTICK_GF2K=m +CONFIG_JOYSTICK_GRIP=m +CONFIG_JOYSTICK_GRIP_MP=m +CONFIG_JOYSTICK_GUILLEMOT=m +CONFIG_JOYSTICK_INTERACT=m +CONFIG_JOYSTICK_SIDEWINDER=m +CONFIG_JOYSTICK_TMDC=m +CONFIG_JOYSTICK_IFORCE=m +CONFIG_JOYSTICK_IFORCE_USB=m +CONFIG_JOYSTICK_IFORCE_232=m +CONFIG_JOYSTICK_WARRIOR=m +CONFIG_JOYSTICK_MAGELLAN=m +CONFIG_JOYSTICK_SPACEORB=m +CONFIG_JOYSTICK_SPACEBALL=m +CONFIG_JOYSTICK_STINGER=m +CONFIG_JOYSTICK_TWIDJOY=m +CONFIG_JOYSTICK_ZHENHUA=m +CONFIG_JOYSTICK_DB9=m +CONFIG_JOYSTICK_GAMECON=m +CONFIG_JOYSTICK_TURBOGRAFX=m +CONFIG_JOYSTICK_AS5011=m +CONFIG_JOYSTICK_JOYDUMP=m +CONFIG_JOYSTICK_XPAD=m +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_JOYSTICK_WALKERA0701=m +CONFIG_JOYSTICK_PSXPAD_SPI=m +CONFIG_JOYSTICK_PSXPAD_SPI_FF=y +CONFIG_JOYSTICK_PXRC=m +CONFIG_JOYSTICK_FSIA6B=m +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_HANWANG=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_USB_PEGASUS=m +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_88PM860X=m +CONFIG_TOUCHSCREEN_ADS7846=m +CONFIG_TOUCHSCREEN_AD7877=m +CONFIG_TOUCHSCREEN_AD7879=m +CONFIG_TOUCHSCREEN_AD7879_I2C=m +CONFIG_TOUCHSCREEN_AD7879_SPI=m +CONFIG_TOUCHSCREEN_ADC=m +CONFIG_TOUCHSCREEN_ATMEL_MXT=m +CONFIG_TOUCHSCREEN_ATMEL_MXT_T37=y +CONFIG_TOUCHSCREEN_AUO_PIXCIR=m +CONFIG_TOUCHSCREEN_BU21013=m +CONFIG_TOUCHSCREEN_BU21029=m +CONFIG_TOUCHSCREEN_CHIPONE_ICN8505=m +CONFIG_TOUCHSCREEN_CY8CTMA140=m +CONFIG_TOUCHSCREEN_CY8CTMG110=m +CONFIG_TOUCHSCREEN_CYTTSP_CORE=m +CONFIG_TOUCHSCREEN_CYTTSP_I2C=m +CONFIG_TOUCHSCREEN_CYTTSP_SPI=m +CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m +CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m +CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m +CONFIG_TOUCHSCREEN_DA9034=m +CONFIG_TOUCHSCREEN_DA9052=m +CONFIG_TOUCHSCREEN_DYNAPRO=m +CONFIG_TOUCHSCREEN_HAMPSHIRE=m +CONFIG_TOUCHSCREEN_EETI=m +CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m +CONFIG_TOUCHSCREEN_EXC3000=m +CONFIG_TOUCHSCREEN_FUJITSU=m +CONFIG_TOUCHSCREEN_GOODIX=m +CONFIG_TOUCHSCREEN_HIDEEP=m +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +CONFIG_TOUCHSCREEN_ILI210X=m +# CONFIG_TOUCHSCREEN_ILITEK is not set +CONFIG_TOUCHSCREEN_S6SY761=m +CONFIG_TOUCHSCREEN_GUNZE=m +CONFIG_TOUCHSCREEN_EKTF2127=m +CONFIG_TOUCHSCREEN_ELAN=m +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +CONFIG_TOUCHSCREEN_MAX11801=m +CONFIG_TOUCHSCREEN_MCS5000=m +CONFIG_TOUCHSCREEN_MMS114=m +CONFIG_TOUCHSCREEN_MELFAS_MIP4=m +# CONFIG_TOUCHSCREEN_MSG2638 is not set +CONFIG_TOUCHSCREEN_MTOUCH=m +CONFIG_TOUCHSCREEN_INEXIO=m +CONFIG_TOUCHSCREEN_MK712=m +CONFIG_TOUCHSCREEN_PENMOUNT=m +CONFIG_TOUCHSCREEN_EDT_FT5X06=m +CONFIG_TOUCHSCREEN_TOUCHRIGHT=m +CONFIG_TOUCHSCREEN_TOUCHWIN=m +CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m +CONFIG_TOUCHSCREEN_UCB1400=m +CONFIG_TOUCHSCREEN_PIXCIR=m +CONFIG_TOUCHSCREEN_WDT87XX_I2C=m +CONFIG_TOUCHSCREEN_WM831X=m +CONFIG_TOUCHSCREEN_WM97XX=m +CONFIG_TOUCHSCREEN_WM9705=y +CONFIG_TOUCHSCREEN_WM9712=y +CONFIG_TOUCHSCREEN_WM9713=y +CONFIG_TOUCHSCREEN_USB_COMPOSITE=m +CONFIG_TOUCHSCREEN_MC13783=m +CONFIG_TOUCHSCREEN_USB_EGALAX=y +CONFIG_TOUCHSCREEN_USB_PANJIT=y +CONFIG_TOUCHSCREEN_USB_3M=y +CONFIG_TOUCHSCREEN_USB_ITM=y +CONFIG_TOUCHSCREEN_USB_ETURBO=y +CONFIG_TOUCHSCREEN_USB_GUNZE=y +CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y +CONFIG_TOUCHSCREEN_USB_IRTOUCH=y +CONFIG_TOUCHSCREEN_USB_IDEALTEK=y +CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y +CONFIG_TOUCHSCREEN_USB_GOTOP=y +CONFIG_TOUCHSCREEN_USB_JASTEC=y +CONFIG_TOUCHSCREEN_USB_ELO=y +CONFIG_TOUCHSCREEN_USB_E2I=y +CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y +CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y +CONFIG_TOUCHSCREEN_USB_NEXIO=y +CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y +CONFIG_TOUCHSCREEN_TOUCHIT213=m +CONFIG_TOUCHSCREEN_TSC_SERIO=m +CONFIG_TOUCHSCREEN_TSC200X_CORE=m +CONFIG_TOUCHSCREEN_TSC2004=m +CONFIG_TOUCHSCREEN_TSC2005=m +CONFIG_TOUCHSCREEN_TSC2007=m +CONFIG_TOUCHSCREEN_TSC2007_IIO=y +CONFIG_TOUCHSCREEN_PCAP=m +CONFIG_TOUCHSCREEN_RM_TS=m +CONFIG_TOUCHSCREEN_SILEAD=m +CONFIG_TOUCHSCREEN_SIS_I2C=m +CONFIG_TOUCHSCREEN_ST1232=m +CONFIG_TOUCHSCREEN_STMFTS=m +CONFIG_TOUCHSCREEN_SUR40=m +CONFIG_TOUCHSCREEN_SURFACE3_SPI=m +CONFIG_TOUCHSCREEN_SX8654=m +CONFIG_TOUCHSCREEN_TPS6507X=m +CONFIG_TOUCHSCREEN_ZET6223=m +CONFIG_TOUCHSCREEN_ZFORCE=m +CONFIG_TOUCHSCREEN_ROHM_BU21023=m +CONFIG_TOUCHSCREEN_IQS5XX=m +CONFIG_TOUCHSCREEN_ZINITIX=m +CONFIG_INPUT_MISC=y +CONFIG_INPUT_88PM860X_ONKEY=m +CONFIG_INPUT_88PM80X_ONKEY=m +CONFIG_INPUT_AD714X=m +CONFIG_INPUT_AD714X_I2C=m +CONFIG_INPUT_AD714X_SPI=m +CONFIG_INPUT_ARIZONA_HAPTICS=m +CONFIG_INPUT_BMA150=m +CONFIG_INPUT_E3X0_BUTTON=m +CONFIG_INPUT_PCSPKR=m +CONFIG_INPUT_MAX77693_HAPTIC=m +CONFIG_INPUT_MAX8925_ONKEY=m +CONFIG_INPUT_MAX8997_HAPTIC=m +CONFIG_INPUT_MC13783_PWRBUTTON=m +CONFIG_INPUT_MMA8450=m +CONFIG_INPUT_APANEL=m +CONFIG_INPUT_GPIO_BEEPER=m +CONFIG_INPUT_GPIO_DECODER=m +CONFIG_INPUT_GPIO_VIBRA=m +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_KXTJ9=m +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_REGULATOR_HAPTIC=m +CONFIG_INPUT_RETU_PWRBUTTON=m +CONFIG_INPUT_AXP20X_PEK=m +CONFIG_INPUT_TWL4030_PWRBUTTON=m +CONFIG_INPUT_TWL4030_VIBRA=m +CONFIG_INPUT_TWL6040_VIBRA=m +CONFIG_INPUT_UINPUT=m +CONFIG_INPUT_PALMAS_PWRBUTTON=m +CONFIG_INPUT_PCF50633_PMU=m +CONFIG_INPUT_PCF8574=m +CONFIG_INPUT_PWM_BEEPER=m +CONFIG_INPUT_PWM_VIBRA=m +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_INPUT_DA7280_HAPTICS=m +CONFIG_INPUT_DA9052_ONKEY=m +CONFIG_INPUT_DA9055_ONKEY=m +CONFIG_INPUT_DA9063_ONKEY=m +CONFIG_INPUT_WM831X_ON=m +CONFIG_INPUT_PCAP=m +CONFIG_INPUT_ADXL34X=m +CONFIG_INPUT_ADXL34X_I2C=m +CONFIG_INPUT_ADXL34X_SPI=m +CONFIG_INPUT_IMS_PCU=m +CONFIG_INPUT_IQS269A=m +# CONFIG_INPUT_IQS626A is not set +CONFIG_INPUT_CMA3000=m +CONFIG_INPUT_CMA3000_I2C=m +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m +CONFIG_INPUT_IDEAPAD_SLIDEBAR=m +CONFIG_INPUT_SOC_BUTTON_ARRAY=m +CONFIG_INPUT_DRV260X_HAPTICS=m +CONFIG_INPUT_DRV2665_HAPTICS=m +CONFIG_INPUT_DRV2667_HAPTICS=m +CONFIG_INPUT_RAVE_SP_PWRBUTTON=m +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +CONFIG_RMI4_F34=y +CONFIG_RMI4_F3A=y +# CONFIG_RMI4_F54 is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=m +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=m +CONFIG_SERIO_SERPORT=m +CONFIG_SERIO_CT82C710=m +CONFIG_SERIO_PARKBD=m +CONFIG_SERIO_PCIPS2=m +CONFIG_SERIO_LIBPS2=m +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_PS2MULT=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_HYPERV_KEYBOARD=m +CONFIG_SERIO_GPIO_PS2=m +CONFIG_USERIO=m +CONFIG_GAMEPORT=m +CONFIG_GAMEPORT_NS558=m +CONFIG_GAMEPORT_L4=m +CONFIG_GAMEPORT_EMU10K1=m +CONFIG_GAMEPORT_FM801=m +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set +CONFIG_SERIAL_8250_FINTEK=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=m +CONFIG_SERIAL_8250_CS=m +CONFIG_SERIAL_8250_MEN_MCB=m +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=32 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_DW=m +CONFIG_SERIAL_8250_RT288X=y +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_MAX3100=m +CONFIG_SERIAL_MAX310X=m +CONFIG_SERIAL_UARTLITE=m +CONFIG_SERIAL_UARTLITE_NR_UARTS=1 +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_LANTIQ is not set +CONFIG_SERIAL_SCCNXP=m +CONFIG_SERIAL_SC16IS7XX_CORE=m +CONFIG_SERIAL_SC16IS7XX=m +CONFIG_SERIAL_SC16IS7XX_I2C=y +CONFIG_SERIAL_SC16IS7XX_SPI=y +# CONFIG_SERIAL_BCM63XX is not set +CONFIG_SERIAL_ALTERA_JTAGUART=m +CONFIG_SERIAL_ALTERA_UART=m +CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4 +CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200 +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +CONFIG_SERIAL_RP2=m +CONFIG_SERIAL_RP2_NR_UARTS=32 +CONFIG_SERIAL_FSL_LPUART=m +CONFIG_SERIAL_FSL_LINFLEXUART=m +CONFIG_SERIAL_MEN_Z135=m +CONFIG_SERIAL_SPRD=m +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_MOXA_INTELLIO=m +CONFIG_MOXA_SMARTIO=m +CONFIG_SYNCLINK_GT=m +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +CONFIG_NULL_TTY=m +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +CONFIG_SERIAL_DEV_BUS=y +CONFIG_SERIAL_DEV_CTRL_TTYPORT=y +CONFIG_PRINTER=m +CONFIG_LP_CONSOLE=y +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_IPMB_DEVICE_INTERFACE=m +CONFIG_HW_RANDOM=m +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +CONFIG_HW_RANDOM_BA431=m +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_XIPHERA=m +CONFIG_APPLICOM=m + +# +# PCMCIA character devices +# +CONFIG_SYNCLINK_CS=m +CONFIG_CARDMAN_4000=m +CONFIG_CARDMAN_4040=m +CONFIG_SCR24X=m +CONFIG_IPWIRELESS=m +# end of PCMCIA character devices + +CONFIG_MWAVE=m +CONFIG_DEVMEM=y +CONFIG_NVRAM=y +CONFIG_RAW_DRIVER=m +CONFIG_MAX_RAW_DEVS=8192 +CONFIG_DEVPORT=y +CONFIG_HPET=y +# CONFIG_HPET_MMAP is not set +CONFIG_HANGCHECK_TIMER=m +CONFIG_TCG_TPM=m +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +CONFIG_TCG_TIS=m +CONFIG_TCG_TIS_SPI=m +CONFIG_TCG_TIS_SPI_CR50=y +CONFIG_TCG_TIS_I2C_CR50=m +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_XEN=m +CONFIG_TCG_CRB=m +CONFIG_TCG_VTPM_PROXY=m +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TCG_TIS_ST33ZP24_SPI=m +CONFIG_TELCLOCK=m +CONFIG_XILLYBUS=m +CONFIG_XILLYBUS_PCIE=m +# end of Character devices + +# CONFIG_RANDOM_TRUST_CPU is not set +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_MUX_GPIO=m +CONFIG_I2C_MUX_LTC4306=m +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_REG=m +CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +CONFIG_I2C_ALI1535=m +CONFIG_I2C_ALI1563=m +CONFIG_I2C_ALI15X3=m +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_AMD_MP2=m +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_ISMT=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_CHT_WC=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_NFORCE2_S4985=m +CONFIG_I2C_NVIDIA_GPU=m +CONFIG_I2C_SIS5595=m +CONFIG_I2C_SIS630=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +CONFIG_I2C_CBUS_GPIO=m +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_SLAVE=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y +CONFIG_I2C_DESIGNWARE_PCI=y +CONFIG_I2C_EMEV2=m +CONFIG_I2C_GPIO=m +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set +CONFIG_I2C_KEMPLD=m +CONFIG_I2C_OCORES=m +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_XILINX=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_DLN2=m +# CONFIG_I2C_CP2615 is not set +CONFIG_I2C_PARPORT=m +CONFIG_I2C_ROBOTFUZZ_OSIF=m +CONFIG_I2C_TAOS_EVM=m +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_MLXCPLD=m +CONFIG_I2C_CROS_EC_TUNNEL=m +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m +CONFIG_I2C_SLAVE_TESTUNIT=m +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_MEM=y + +# +# SPI Master Controller Drivers +# +CONFIG_SPI_ALTERA=m +CONFIG_SPI_ALTERA_CORE=m +# CONFIG_SPI_ALTERA_DFL is not set +CONFIG_SPI_AXI_SPI_ENGINE=m +CONFIG_SPI_BITBANG=m +CONFIG_SPI_BUTTERFLY=m +CONFIG_SPI_CADENCE=m +CONFIG_SPI_DESIGNWARE=m +CONFIG_SPI_DW_DMA=y +CONFIG_SPI_DW_PCI=m +CONFIG_SPI_DW_MMIO=m +CONFIG_SPI_DLN2=m +CONFIG_SPI_NXP_FLEXSPI=m +CONFIG_SPI_GPIO=m +CONFIG_SPI_LM70_LLP=m +# CONFIG_SPI_LANTIQ_SSC is not set +CONFIG_SPI_OC_TINY=m +CONFIG_SPI_PXA2XX=m +CONFIG_SPI_PXA2XX_PCI=m +# CONFIG_SPI_ROCKCHIP is not set +CONFIG_SPI_SC18IS602=m +CONFIG_SPI_SIFIVE=m +CONFIG_SPI_MXIC=m +CONFIG_SPI_XCOMM=m +CONFIG_SPI_XILINX=m +CONFIG_SPI_ZYNQMP_GQSPI=m +CONFIG_SPI_AMD=m + +# +# SPI Multiplexer support +# +CONFIG_SPI_MUX=m + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=m +CONFIG_SPI_LOOPBACK_TEST=m +CONFIG_SPI_TLE62X0=m +CONFIG_SPI_SLAVE=y +CONFIG_SPI_SLAVE_TIME=m +CONFIG_SPI_SLAVE_SYSTEM_CONTROL=m +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +CONFIG_PPS_CLIENT_KTIMER=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_DP83640_PHY=m +CONFIG_PTP_1588_CLOCK_INES=m +CONFIG_PTP_1588_CLOCK_KVM=m +CONFIG_PTP_1588_CLOCK_IDT82P33=m +CONFIG_PTP_1588_CLOCK_IDTCM=m +CONFIG_PTP_1588_CLOCK_VMW=m +CONFIG_PTP_1588_CLOCK_OCP=m +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +CONFIG_PINCTRL_AMD=m +CONFIG_PINCTRL_DA9062=m +CONFIG_PINCTRL_MCP23S08_I2C=m +CONFIG_PINCTRL_MCP23S08_SPI=m +CONFIG_PINCTRL_MCP23S08=m +CONFIG_PINCTRL_SX150X=y +CONFIG_PINCTRL_BAYTRAIL=y +CONFIG_PINCTRL_CHERRYVIEW=y +CONFIG_PINCTRL_LYNXPOINT=y +CONFIG_PINCTRL_INTEL=y +CONFIG_PINCTRL_ALDERLAKE=y +CONFIG_PINCTRL_BROXTON=y +CONFIG_PINCTRL_CANNONLAKE=y +CONFIG_PINCTRL_CEDARFORK=y +CONFIG_PINCTRL_DENVERTON=y +CONFIG_PINCTRL_ELKHARTLAKE=y +CONFIG_PINCTRL_EMMITSBURG=y +CONFIG_PINCTRL_GEMINILAKE=y +CONFIG_PINCTRL_ICELAKE=y +CONFIG_PINCTRL_JASPERLAKE=y +CONFIG_PINCTRL_LAKEFIELD=y +CONFIG_PINCTRL_LEWISBURG=y +CONFIG_PINCTRL_SUNRISEPOINT=y +CONFIG_PINCTRL_TIGERLAKE=y + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_PINCTRL_MADERA=m +CONFIG_PINCTRL_CS47L15=y +CONFIG_PINCTRL_CS47L35=y +CONFIG_PINCTRL_CS47L85=y +CONFIG_PINCTRL_CS47L90=y +CONFIG_PINCTRL_CS47L92=y +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m +CONFIG_GPIO_MAX730X=m + +# +# Memory mapped GPIO drivers +# +CONFIG_GPIO_AMDPT=m +CONFIG_GPIO_DWAPB=m +CONFIG_GPIO_EXAR=m +CONFIG_GPIO_GENERIC_PLATFORM=m +CONFIG_GPIO_ICH=m +CONFIG_GPIO_MB86S7X=m +CONFIG_GPIO_MENZ127=m +CONFIG_GPIO_SIOX=m +CONFIG_GPIO_VX855=m +CONFIG_GPIO_AMD_FCH=m +# end of Memory mapped GPIO drivers + +# +# Port-mapped I/O GPIO drivers +# +CONFIG_GPIO_F7188X=m +CONFIG_GPIO_IT87=m +CONFIG_GPIO_SCH=m +CONFIG_GPIO_SCH311X=m +CONFIG_GPIO_WINBOND=m +CONFIG_GPIO_WS16C48=m +# end of Port-mapped I/O GPIO drivers + +# +# I2C GPIO expanders +# +CONFIG_GPIO_ADP5588=m +CONFIG_GPIO_MAX7300=m +CONFIG_GPIO_MAX732X=m +CONFIG_GPIO_PCA953X=m +CONFIG_GPIO_PCA953X_IRQ=y +CONFIG_GPIO_PCA9570=m +CONFIG_GPIO_PCF857X=m +CONFIG_GPIO_TPIC2810=m +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +CONFIG_GPIO_ADP5520=m +CONFIG_GPIO_ARIZONA=m +CONFIG_GPIO_BD9571MWV=m +CONFIG_GPIO_CRYSTAL_COVE=m +CONFIG_GPIO_DA9052=m +CONFIG_GPIO_DA9055=m +CONFIG_GPIO_DLN2=m +CONFIG_GPIO_JANZ_TTL=m +CONFIG_GPIO_KEMPLD=m +CONFIG_GPIO_LP3943=m +CONFIG_GPIO_LP873X=m +CONFIG_GPIO_MADERA=m +CONFIG_GPIO_PALMAS=y +CONFIG_GPIO_RC5T583=y +CONFIG_GPIO_TPS65086=m +CONFIG_GPIO_TPS6586X=y +CONFIG_GPIO_TPS65910=y +CONFIG_GPIO_TPS65912=m +CONFIG_GPIO_TPS68470=y +CONFIG_GPIO_TQMX86=m +CONFIG_GPIO_TWL4030=m +CONFIG_GPIO_TWL6040=m +CONFIG_GPIO_UCB1400=m +CONFIG_GPIO_WHISKEY_COVE=m +CONFIG_GPIO_WM831X=m +CONFIG_GPIO_WM8350=m +CONFIG_GPIO_WM8994=m +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +CONFIG_GPIO_AMD8111=m +CONFIG_GPIO_ML_IOH=m +CONFIG_GPIO_PCI_IDIO_16=m +CONFIG_GPIO_PCIE_IDIO_24=m +CONFIG_GPIO_RDC321X=m +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +CONFIG_GPIO_MAX3191X=m +CONFIG_GPIO_MAX7301=m +CONFIG_GPIO_MC33880=m +CONFIG_GPIO_PISOSR=m +CONFIG_GPIO_XRA1403=m +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +CONFIG_GPIO_AGGREGATOR=m +CONFIG_GPIO_MOCKUP=m +# end of Virtual GPIO drivers + +CONFIG_W1=m +CONFIG_W1_CON=y + +# +# 1-wire Bus Masters +# +CONFIG_W1_MASTER_MATROX=m +CONFIG_W1_MASTER_DS2490=m +CONFIG_W1_MASTER_DS2482=m +CONFIG_W1_MASTER_DS1WM=m +CONFIG_W1_MASTER_GPIO=m +CONFIG_W1_MASTER_SGI=m +# end of 1-wire Bus Masters + +# +# 1-wire Slaves +# +CONFIG_W1_SLAVE_THERM=m +CONFIG_W1_SLAVE_SMEM=m +CONFIG_W1_SLAVE_DS2405=m +CONFIG_W1_SLAVE_DS2408=m +# CONFIG_W1_SLAVE_DS2408_READBACK is not set +CONFIG_W1_SLAVE_DS2413=m +CONFIG_W1_SLAVE_DS2406=m +CONFIG_W1_SLAVE_DS2423=m +CONFIG_W1_SLAVE_DS2805=m +CONFIG_W1_SLAVE_DS2430=m +CONFIG_W1_SLAVE_DS2431=m +CONFIG_W1_SLAVE_DS2433=m +# CONFIG_W1_SLAVE_DS2433_CRC is not set +CONFIG_W1_SLAVE_DS2438=m +CONFIG_W1_SLAVE_DS250X=m +CONFIG_W1_SLAVE_DS2780=m +CONFIG_W1_SLAVE_DS2781=m +CONFIG_W1_SLAVE_DS28E04=m +CONFIG_W1_SLAVE_DS28E17=m +# end of 1-wire Slaves + +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_MT6323=y +CONFIG_POWER_RESET_RESTART=y +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +CONFIG_PDA_POWER=m +CONFIG_GENERIC_ADC_BATTERY=m +CONFIG_MAX8925_POWER=m +CONFIG_WM831X_BACKUP=m +CONFIG_WM831X_POWER=m +CONFIG_WM8350_POWER=m +CONFIG_TEST_POWER=m +CONFIG_BATTERY_88PM860X=m +CONFIG_CHARGER_ADP5061=m +CONFIG_BATTERY_CW2015=m +CONFIG_BATTERY_DS2760=m +CONFIG_BATTERY_DS2780=m +CONFIG_BATTERY_DS2781=m +CONFIG_BATTERY_DS2782=m +CONFIG_BATTERY_SBS=m +CONFIG_CHARGER_SBS=m +CONFIG_MANAGER_SBS=m +CONFIG_BATTERY_BQ27XXX=m +CONFIG_BATTERY_BQ27XXX_I2C=m +CONFIG_BATTERY_BQ27XXX_HDQ=m +# CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM is not set +CONFIG_BATTERY_DA9030=m +CONFIG_BATTERY_DA9052=m +CONFIG_CHARGER_DA9150=m +CONFIG_BATTERY_DA9150=m +CONFIG_CHARGER_AXP20X=m +CONFIG_BATTERY_AXP20X=m +CONFIG_AXP20X_POWER=m +CONFIG_AXP288_CHARGER=m +CONFIG_AXP288_FUEL_GAUGE=m +CONFIG_BATTERY_MAX17040=m +CONFIG_BATTERY_MAX17042=m +CONFIG_BATTERY_MAX1721X=m +CONFIG_BATTERY_TWL4030_MADC=m +CONFIG_CHARGER_88PM860X=m +CONFIG_CHARGER_PCF50633=m +CONFIG_BATTERY_RX51=m +CONFIG_CHARGER_ISP1704=m +CONFIG_CHARGER_MAX8903=m +CONFIG_CHARGER_TWL4030=m +CONFIG_CHARGER_LP8727=m +CONFIG_CHARGER_LP8788=m +CONFIG_CHARGER_GPIO=m +CONFIG_CHARGER_MANAGER=y +CONFIG_CHARGER_LT3651=m +CONFIG_CHARGER_LTC4162L=m +CONFIG_CHARGER_MAX14577=m +CONFIG_CHARGER_MAX77693=m +CONFIG_CHARGER_MAX8997=m +CONFIG_CHARGER_MAX8998=m +CONFIG_CHARGER_MP2629=m +CONFIG_CHARGER_BQ2415X=m +CONFIG_CHARGER_BQ24190=m +CONFIG_CHARGER_BQ24257=m +CONFIG_CHARGER_BQ24735=m +CONFIG_CHARGER_BQ2515X=m +CONFIG_CHARGER_BQ25890=m +CONFIG_CHARGER_BQ25980=m +CONFIG_CHARGER_BQ256XX=m +CONFIG_CHARGER_SMB347=m +CONFIG_CHARGER_TPS65090=m +CONFIG_BATTERY_GAUGE_LTC2941=m +# CONFIG_BATTERY_GOLDFISH is not set +CONFIG_BATTERY_RT5033=m +CONFIG_CHARGER_RT9455=m +CONFIG_CHARGER_CROS_USBPD=m +CONFIG_CHARGER_BD99954=m +CONFIG_CHARGER_WILCO=m +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_ABITUGURU=m +CONFIG_SENSORS_ABITUGURU3=m +CONFIG_SENSORS_AD7314=m +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1021=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM1177=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +CONFIG_SENSORS_ADT7310=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_AHT10=m +CONFIG_SENSORS_AS370=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_AXI_FAN_CONTROL=m +CONFIG_SENSORS_K8TEMP=m +CONFIG_SENSORS_K10TEMP=m +CONFIG_SENSORS_FAM15H_POWER=m +CONFIG_SENSORS_APPLESMC=m +CONFIG_SENSORS_ASB100=m +CONFIG_SENSORS_ASPEED=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_CORSAIR_CPRO=m +CONFIG_SENSORS_CORSAIR_PSU=m +CONFIG_SENSORS_DRIVETEMP=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_DELL_SMM=m +CONFIG_SENSORS_DA9052_ADC=m +CONFIG_SENSORS_DA9055=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_MC13783_ADC=m +CONFIG_SENSORS_FSCHMD=m +CONFIG_SENSORS_FTSTEUTATES=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_G762=m +CONFIG_SENSORS_HIH6130=m +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IIO_HWMON=m +CONFIG_SENSORS_I5500=m +CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_POWR1220=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC2945=m +CONFIG_SENSORS_LTC2947=m +CONFIG_SENSORS_LTC2947_I2C=m +CONFIG_SENSORS_LTC2947_SPI=m +CONFIG_SENSORS_LTC2990=m +CONFIG_SENSORS_LTC2992=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4222=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4260=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX1111=m +CONFIG_SENSORS_MAX127=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +CONFIG_SENSORS_MAX31722=m +CONFIG_SENSORS_MAX31730=m +CONFIG_SENSORS_MAX6621=m +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6642=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MAX31790=m +CONFIG_SENSORS_MCP3021=m +CONFIG_SENSORS_MLXREG_FAN=m +CONFIG_SENSORS_TC654=m +CONFIG_SENSORS_TPS23861=m +CONFIG_SENSORS_MENF21BMC_HWMON=m +CONFIG_SENSORS_MR75203=m +CONFIG_SENSORS_ADCXX=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM70=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6683=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +CONFIG_SENSORS_NPCM7XX=m +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +CONFIG_SENSORS_ADM1266=m +CONFIG_SENSORS_ADM1275=m +CONFIG_SENSORS_BEL_PFE=m +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_FSP_3Y is not set +CONFIG_SENSORS_IBM_CFFPS=m +CONFIG_SENSORS_INSPUR_IPSPS=m +CONFIG_SENSORS_IR35221=m +# CONFIG_SENSORS_IR36021 is not set +CONFIG_SENSORS_IR38064=m +CONFIG_SENSORS_IRPS5401=m +CONFIG_SENSORS_ISL68137=m +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC2978_REGULATOR is not set +CONFIG_SENSORS_LTC3815=m +# CONFIG_SENSORS_MAX15301 is not set +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX16601=m +CONFIG_SENSORS_MAX20730=m +CONFIG_SENSORS_MAX20751=m +CONFIG_SENSORS_MAX31785=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_MP2975=m +CONFIG_SENSORS_PM6764TR=m +CONFIG_SENSORS_PXE1610=m +CONFIG_SENSORS_Q54SJ108A2=m +# CONFIG_SENSORS_STPDDC60 is not set +CONFIG_SENSORS_TPS40422=m +CONFIG_SENSORS_TPS53679=m +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_XDPE122=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_SBTSI=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +CONFIG_SENSORS_SHT3x=m +CONFIG_SENSORS_SHTC1=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC2103=m +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_STTS751=m +CONFIG_SENSORS_SMM665=m +CONFIG_SENSORS_ADC128D818=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_ADS7871=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_INA3221=m +CONFIG_SENSORS_TC74=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP103=m +CONFIG_SENSORS_TMP108=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_TMP513=m +CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83773G=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_WM831X=m +CONFIG_SENSORS_WM8350=m +CONFIG_SENSORS_XGENE=m +CONFIG_SENSORS_INTEL_M10_BMC_HWMON=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_SENSORS_ATK0110=m +CONFIG_THERMAL=y +CONFIG_THERMAL_NETLINK=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y +CONFIG_DEVFREQ_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# +CONFIG_INTEL_POWERCLAMP=m +CONFIG_X86_THERMAL_VECTOR=y +CONFIG_X86_PKG_TEMP_THERMAL=m +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m +CONFIG_INTEL_SOC_DTS_THERMAL=m + +# +# ACPI INT340X thermal drivers +# +CONFIG_INT340X_THERMAL=m +CONFIG_ACPI_THERMAL_REL=m +CONFIG_INT3406_THERMAL=m +CONFIG_PROC_THERMAL_MMIO_RAPL=m +# end of ACPI INT340X thermal drivers + +CONFIG_INTEL_BXT_PMIC_THERMAL=m +CONFIG_INTEL_PCH_THERMAL=m +# CONFIG_INTEL_TCC_COOLING is not set +# end of Intel thermal drivers + +CONFIG_GENERIC_ADC_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y + +# +# Watchdog Pretimeout Governors +# +CONFIG_WATCHDOG_PRETIMEOUT_GOV=y +CONFIG_WATCHDOG_PRETIMEOUT_GOV_SEL=m +CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=m +CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=y +# CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP is not set +CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC=y + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +# CONFIG_SOFT_WATCHDOG_PRETIMEOUT is not set +CONFIG_DA9052_WATCHDOG=m +CONFIG_DA9055_WATCHDOG=m +CONFIG_DA9063_WATCHDOG=m +CONFIG_DA9062_WATCHDOG=m +CONFIG_MENF21BMC_WATCHDOG=m +CONFIG_MENZ069_WATCHDOG=m +CONFIG_WDAT_WDT=m +CONFIG_WM831X_WATCHDOG=m +CONFIG_WM8350_WATCHDOG=m +CONFIG_XILINX_WATCHDOG=m +CONFIG_ZIIRAVE_WATCHDOG=m +CONFIG_RAVE_SP_WATCHDOG=m +CONFIG_MLX_WDT=m +CONFIG_CADENCE_WATCHDOG=m +CONFIG_DW_WATCHDOG=m +CONFIG_TWL4030_WATCHDOG=m +CONFIG_MAX63XX_WATCHDOG=m +CONFIG_RETU_WATCHDOG=m +CONFIG_ACQUIRE_WDT=m +CONFIG_ADVANTECH_WDT=m +CONFIG_ALIM1535_WDT=m +CONFIG_ALIM7101_WDT=m +CONFIG_EBC_C384_WDT=m +CONFIG_F71808E_WDT=m +CONFIG_SP5100_TCO=m +CONFIG_SBC_FITPC2_WATCHDOG=m +CONFIG_EUROTECH_WDT=m +CONFIG_IB700_WDT=m +CONFIG_IBMASR=m +CONFIG_WAFER_WDT=m +CONFIG_I6300ESB_WDT=m +CONFIG_IE6XX_WDT=m +CONFIG_ITCO_WDT=m +CONFIG_ITCO_VENDOR_SUPPORT=y +CONFIG_IT8712F_WDT=m +CONFIG_IT87_WDT=m +CONFIG_HP_WATCHDOG=m +CONFIG_HPWDT_NMI_DECODING=y +CONFIG_KEMPLD_WDT=m +CONFIG_SC1200_WDT=m +CONFIG_PC87413_WDT=m +CONFIG_NV_TCO=m +CONFIG_60XX_WDT=m +CONFIG_CPU5_WDT=m +CONFIG_SMSC_SCH311X_WDT=m +CONFIG_SMSC37B787_WDT=m +CONFIG_TQMX86_WDT=m +CONFIG_VIA_WDT=m +CONFIG_W83627HF_WDT=m +CONFIG_W83877F_WDT=m +CONFIG_W83977F_WDT=m +CONFIG_MACHZ_WDT=m +CONFIG_SBC_EPX_C3_WATCHDOG=m +CONFIG_INTEL_MEI_WDT=m +CONFIG_NI903X_WDT=m +CONFIG_NIC7018_WDT=m +CONFIG_MEN_A21_WDT=m +CONFIG_XEN_WDT=m + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +CONFIG_SSB=m +CONFIG_SSB_SPROM=y +CONFIG_SSB_BLOCKIO=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +CONFIG_SSB_B43_PCI_BRIDGE=y +CONFIG_SSB_PCMCIAHOST_POSSIBLE=y +CONFIG_SSB_PCMCIAHOST=y +CONFIG_SSB_SDIOHOST_POSSIBLE=y +CONFIG_SSB_SDIOHOST=y +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +CONFIG_SSB_DRIVER_PCICORE=y +CONFIG_SSB_DRIVER_GPIO=y +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_BLOCKIO=y +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +CONFIG_MFD_AS3711=y +CONFIG_PMIC_ADP5520=y +CONFIG_MFD_AAT2870_CORE=y +CONFIG_MFD_BCM590XX=m +CONFIG_MFD_BD9571MWV=m +CONFIG_MFD_AXP20X=m +CONFIG_MFD_AXP20X_I2C=m +CONFIG_MFD_CROS_EC_DEV=m +CONFIG_MFD_MADERA=m +CONFIG_MFD_MADERA_I2C=m +CONFIG_MFD_MADERA_SPI=m +CONFIG_MFD_CS47L15=y +CONFIG_MFD_CS47L35=y +CONFIG_MFD_CS47L85=y +CONFIG_MFD_CS47L90=y +CONFIG_MFD_CS47L92=y +CONFIG_PMIC_DA903X=y +CONFIG_PMIC_DA9052=y +CONFIG_MFD_DA9052_SPI=y +CONFIG_MFD_DA9052_I2C=y +CONFIG_MFD_DA9055=y +CONFIG_MFD_DA9062=m +CONFIG_MFD_DA9063=m +CONFIG_MFD_DA9150=m +CONFIG_MFD_DLN2=m +CONFIG_MFD_MC13XXX=m +CONFIG_MFD_MC13XXX_SPI=m +CONFIG_MFD_MC13XXX_I2C=m +CONFIG_MFD_MP2629=m +CONFIG_HTC_PASIC3=m +CONFIG_HTC_I2CPLD=y +CONFIG_MFD_INTEL_QUARK_I2C_GPIO=m +CONFIG_LPC_ICH=m +CONFIG_LPC_SCH=m +CONFIG_INTEL_SOC_PMIC=y +CONFIG_INTEL_SOC_PMIC_BXTWC=m +CONFIG_INTEL_SOC_PMIC_CHTWC=y +CONFIG_INTEL_SOC_PMIC_CHTDC_TI=m +CONFIG_INTEL_SOC_PMIC_MRFLD=m +CONFIG_MFD_INTEL_LPSS=m +CONFIG_MFD_INTEL_LPSS_ACPI=m +CONFIG_MFD_INTEL_LPSS_PCI=m +CONFIG_MFD_INTEL_PMC_BXT=m +CONFIG_MFD_INTEL_PMT=m +CONFIG_MFD_IQS62X=m +CONFIG_MFD_JANZ_CMODIO=m +CONFIG_MFD_KEMPLD=m +CONFIG_MFD_88PM800=m +CONFIG_MFD_88PM805=m +CONFIG_MFD_88PM860X=y +CONFIG_MFD_MAX14577=m +CONFIG_MFD_MAX77693=m +CONFIG_MFD_MAX77843=y +CONFIG_MFD_MAX8907=m +CONFIG_MFD_MAX8925=y +CONFIG_MFD_MAX8997=y +CONFIG_MFD_MAX8998=y +CONFIG_MFD_MT6360=m +CONFIG_MFD_MT6397=m +CONFIG_MFD_MENF21BMC=m +CONFIG_EZX_PCAP=y +CONFIG_MFD_VIPERBOARD=m +CONFIG_MFD_RETU=m +CONFIG_MFD_PCF50633=m +CONFIG_PCF50633_ADC=m +CONFIG_PCF50633_GPIO=m +CONFIG_UCB1400_CORE=m +CONFIG_MFD_RDC321X=m +CONFIG_MFD_RT5033=m +CONFIG_MFD_RC5T583=y +CONFIG_MFD_SEC_CORE=y +CONFIG_MFD_SI476X_CORE=m +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +CONFIG_MFD_SKY81452=m +CONFIG_MFD_SYSCON=y +CONFIG_MFD_TI_AM335X_TSCADC=m +CONFIG_MFD_LP3943=m +CONFIG_MFD_LP8788=y +CONFIG_MFD_TI_LMU=m +CONFIG_MFD_PALMAS=y +CONFIG_TPS6105X=m +CONFIG_TPS65010=m +CONFIG_TPS6507X=m +CONFIG_MFD_TPS65086=m +CONFIG_MFD_TPS65090=y +CONFIG_MFD_TPS68470=y +CONFIG_MFD_TI_LP873X=m +CONFIG_MFD_TPS6586X=y +CONFIG_MFD_TPS65910=y +CONFIG_MFD_TPS65912=m +CONFIG_MFD_TPS65912_I2C=m +CONFIG_MFD_TPS65912_SPI=m +CONFIG_MFD_TPS80031=y +CONFIG_TWL4030_CORE=y +CONFIG_MFD_TWL4030_AUDIO=y +CONFIG_TWL6040_CORE=y +CONFIG_MFD_WL1273_CORE=m +CONFIG_MFD_LM3533=m +CONFIG_MFD_TQMX86=m +CONFIG_MFD_VX855=m +CONFIG_MFD_ARIZONA=y +CONFIG_MFD_ARIZONA_I2C=m +CONFIG_MFD_ARIZONA_SPI=m +CONFIG_MFD_CS47L24=y +CONFIG_MFD_WM5102=y +CONFIG_MFD_WM5110=y +CONFIG_MFD_WM8997=y +CONFIG_MFD_WM8998=y +CONFIG_MFD_WM8400=y +CONFIG_MFD_WM831X=y +CONFIG_MFD_WM831X_I2C=y +CONFIG_MFD_WM831X_SPI=y +CONFIG_MFD_WM8350=y +CONFIG_MFD_WM8350_I2C=y +CONFIG_MFD_WM8994=m +CONFIG_MFD_WCD934X=m +# CONFIG_MFD_ATC260X_I2C is not set +CONFIG_RAVE_SP_CORE=m +CONFIG_MFD_INTEL_M10_BMC=m +# end of Multifunction device drivers + +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +CONFIG_REGULATOR_FIXED_VOLTAGE=m +CONFIG_REGULATOR_VIRTUAL_CONSUMER=m +CONFIG_REGULATOR_USERSPACE_CONSUMER=m +CONFIG_REGULATOR_88PG86X=m +CONFIG_REGULATOR_88PM800=m +CONFIG_REGULATOR_88PM8607=m +CONFIG_REGULATOR_ACT8865=m +CONFIG_REGULATOR_AD5398=m +CONFIG_REGULATOR_AAT2870=m +CONFIG_REGULATOR_ARIZONA_LDO1=m +CONFIG_REGULATOR_ARIZONA_MICSUPP=m +CONFIG_REGULATOR_AS3711=m +CONFIG_REGULATOR_AXP20X=m +CONFIG_REGULATOR_BCM590XX=m +CONFIG_REGULATOR_BD9571MWV=m +CONFIG_REGULATOR_DA903X=m +CONFIG_REGULATOR_DA9052=m +CONFIG_REGULATOR_DA9055=m +CONFIG_REGULATOR_DA9062=m +CONFIG_REGULATOR_DA9210=m +CONFIG_REGULATOR_DA9211=m +CONFIG_REGULATOR_FAN53555=m +CONFIG_REGULATOR_GPIO=m +CONFIG_REGULATOR_ISL9305=m +CONFIG_REGULATOR_ISL6271A=m +CONFIG_REGULATOR_LM363X=m +CONFIG_REGULATOR_LP3971=m +CONFIG_REGULATOR_LP3972=m +CONFIG_REGULATOR_LP872X=m +CONFIG_REGULATOR_LP8755=m +CONFIG_REGULATOR_LP8788=m +CONFIG_REGULATOR_LTC3589=m +CONFIG_REGULATOR_LTC3676=m +CONFIG_REGULATOR_MAX14577=m +CONFIG_REGULATOR_MAX1586=m +CONFIG_REGULATOR_MAX8649=m +CONFIG_REGULATOR_MAX8660=m +CONFIG_REGULATOR_MAX8907=m +CONFIG_REGULATOR_MAX8925=m +CONFIG_REGULATOR_MAX8952=m +CONFIG_REGULATOR_MAX8997=m +CONFIG_REGULATOR_MAX8998=m +CONFIG_REGULATOR_MAX77693=m +CONFIG_REGULATOR_MAX77826=m +CONFIG_REGULATOR_MC13XXX_CORE=m +CONFIG_REGULATOR_MC13783=m +CONFIG_REGULATOR_MC13892=m +CONFIG_REGULATOR_MP8859=m +CONFIG_REGULATOR_MT6311=m +CONFIG_REGULATOR_MT6323=m +CONFIG_REGULATOR_MT6358=m +CONFIG_REGULATOR_MT6360=m +CONFIG_REGULATOR_MT6397=m +CONFIG_REGULATOR_PALMAS=m +CONFIG_REGULATOR_PCA9450=m +CONFIG_REGULATOR_PCAP=m +CONFIG_REGULATOR_PCF50633=m +CONFIG_REGULATOR_PV88060=m +CONFIG_REGULATOR_PV88080=m +CONFIG_REGULATOR_PV88090=m +CONFIG_REGULATOR_PWM=m +CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY=m +CONFIG_REGULATOR_RC5T583=m +CONFIG_REGULATOR_RT4801=m +CONFIG_REGULATOR_RT5033=m +CONFIG_REGULATOR_RTMV20=m +CONFIG_REGULATOR_S2MPA01=m +CONFIG_REGULATOR_S2MPS11=m +CONFIG_REGULATOR_S5M8767=m +CONFIG_REGULATOR_SKY81452=m +CONFIG_REGULATOR_SLG51000=m +CONFIG_REGULATOR_TPS51632=m +CONFIG_REGULATOR_TPS6105X=m +CONFIG_REGULATOR_TPS62360=m +CONFIG_REGULATOR_TPS65023=m +CONFIG_REGULATOR_TPS6507X=m +CONFIG_REGULATOR_TPS65086=m +CONFIG_REGULATOR_TPS65090=m +CONFIG_REGULATOR_TPS65132=m +CONFIG_REGULATOR_TPS6524X=m +CONFIG_REGULATOR_TPS6586X=m +CONFIG_REGULATOR_TPS65910=m +CONFIG_REGULATOR_TPS65912=m +CONFIG_REGULATOR_TPS80031=m +CONFIG_REGULATOR_TWL4030=m +CONFIG_REGULATOR_WM831X=m +CONFIG_REGULATOR_WM8350=m +CONFIG_REGULATOR_WM8400=m +CONFIG_REGULATOR_WM8994=m +CONFIG_RC_CORE=y +CONFIG_RC_MAP=m +CONFIG_LIRC=y +CONFIG_BPF_LIRC_MODE2=y +CONFIG_RC_DECODERS=y +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SHARP_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_XMP_DECODER=m +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_RCMM_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_RC_ATI_REMOTE=m +CONFIG_IR_ENE=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_FINTEK=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_STREAMZAP=m +CONFIG_IR_WINBOND_CIR=m +CONFIG_IR_IGORPLUGUSB=m +CONFIG_IR_IGUANA=m +CONFIG_IR_TTUSBIR=m +CONFIG_RC_LOOPBACK=m +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +CONFIG_IR_SIR=m +CONFIG_RC_XBOX_DVD=m +CONFIG_IR_TOY=m +CONFIG_CEC_CORE=m +CONFIG_CEC_NOTIFIER=y +CONFIG_CEC_PIN=y +CONFIG_MEDIA_CEC_RC=y +# CONFIG_CEC_PIN_ERROR_INJ is not set +CONFIG_MEDIA_CEC_SUPPORT=y +CONFIG_CEC_CH7322=m +CONFIG_CEC_CROS_EC=m +CONFIG_CEC_GPIO=m +CONFIG_CEC_SECO=m +CONFIG_CEC_SECO_RC=y +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_SUPPORT_FILTER=y +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y + +# +# Media device types +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +# CONFIG_MEDIA_SDR_SUPPORT is not set +CONFIG_MEDIA_PLATFORM_SUPPORT=y +CONFIG_MEDIA_TEST_SUPPORT=y +# end of Media device types + +CONFIG_VIDEO_DEV=m +CONFIG_MEDIA_CONTROLLER=y +CONFIG_DVB_CORE=m + +# +# Video4Linux options +# +CONFIG_VIDEO_V4L2=m +CONFIG_VIDEO_V4L2_I2C=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_TUNER=m +CONFIG_V4L2_MEM2MEM_DEV=m +CONFIG_V4L2_FLASH_LED_CLASS=m +CONFIG_V4L2_FWNODE=m +CONFIG_VIDEOBUF_GEN=m +CONFIG_VIDEOBUF_DMA_SG=m +CONFIG_VIDEOBUF_VMALLOC=m +# end of Video4Linux options + +# +# Media controller options +# +CONFIG_MEDIA_CONTROLLER_DVB=y +CONFIG_MEDIA_CONTROLLER_REQUEST_API=y + +# +# Please notice that the enabled Media controller Request API is EXPERIMENTAL +# +# end of Media controller options + +# +# Digital TV options +# +CONFIG_DVB_MMAP=y +CONFIG_DVB_NET=y +CONFIG_DVB_MAX_ADAPTERS=16 +CONFIG_DVB_DYNAMIC_MINORS=y +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set +# CONFIG_DVB_ULE_DEBUG is not set +# end of Digital TV options + +# +# Media drivers +# + +# +# Drivers filtered as selected at 'Filter media drivers' +# +CONFIG_TTPCI_EEPROM=m +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +CONFIG_USB_GSPCA=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_GL860=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_DTCS033=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KINECT=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STK1135=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TOUPTEK=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y +CONFIG_VIDEO_CPIA2=m +CONFIG_USB_ZR364XX=m +CONFIG_USB_STKWEBCAM=m +CONFIG_USB_S2255=m +CONFIG_VIDEO_USBTV=m + +# +# Analog TV USB devices +# +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_PVRUSB2_DVB=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_STK1160_COMMON=m +CONFIG_VIDEO_STK1160=m +CONFIG_VIDEO_GO7007=m +CONFIG_VIDEO_GO7007_USB=m +CONFIG_VIDEO_GO7007_LOADER=m +CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m + +# +# Analog/digital TV USB devices +# +CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_AU0828_V4L2=y +CONFIG_VIDEO_AU0828_RC=y +CONFIG_VIDEO_CX231XX=m +CONFIG_VIDEO_CX231XX_RC=y +CONFIG_VIDEO_CX231XX_ALSA=m +CONFIG_VIDEO_CX231XX_DVB=m +CONFIG_VIDEO_TM6000=m +CONFIG_VIDEO_TM6000_ALSA=m +CONFIG_VIDEO_TM6000_DVB=m + +# +# Digital TV USB devices +# +CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set +CONFIG_DVB_USB_DIB3000MC=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_DIBUSB_MB=m +CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_CXUSB_ANALOG=y +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_VP7045=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9015=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB_RTL28XXU=m +CONFIG_DVB_USB_DVBSKY=m +CONFIG_DVB_USB_ZD1301=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set +CONFIG_DVB_AS102=m + +# +# Webcam, TV (analog/digital) USB devices +# +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_V4L2=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=m +CONFIG_MEDIA_PCI_SUPPORT=y + +# +# Media capture support +# +CONFIG_VIDEO_MEYE=m +CONFIG_VIDEO_SOLO6X10=m +CONFIG_VIDEO_TW5864=m +CONFIG_VIDEO_TW68=m +CONFIG_VIDEO_TW686X=m + +# +# Media capture/analog TV support +# +CONFIG_VIDEO_IVTV=m +# CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set +CONFIG_VIDEO_IVTV_ALSA=m +CONFIG_VIDEO_FB_IVTV=m +# CONFIG_VIDEO_FB_IVTV_FORCE_PAT is not set +CONFIG_VIDEO_HEXIUM_GEMINI=m +CONFIG_VIDEO_HEXIUM_ORION=m +CONFIG_VIDEO_MXB=m +CONFIG_VIDEO_DT3155=m + +# +# Media capture/analog/hybrid TV support +# +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX18_ALSA=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +CONFIG_VIDEO_CX25821=m +CONFIG_VIDEO_CX25821_ALSA=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +CONFIG_VIDEO_CX88_ENABLE_VP3054=y +CONFIG_VIDEO_CX88_VP3054=m +CONFIG_VIDEO_CX88_MPEG=m +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7134_GO7007=m +CONFIG_VIDEO_SAA7164=m + +# +# Media digital TV PCI Adapters +# +CONFIG_DVB_AV7110_IR=y +CONFIG_DVB_AV7110=m +CONFIG_DVB_AV7110_OSD=y +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_DVB_BUDGET_PATCH=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_DM1105=m +CONFIG_DVB_PT1=m +CONFIG_DVB_PT3=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_DDBRIDGE=m +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set +CONFIG_DVB_SMIPCIE=m +CONFIG_DVB_NETUP_UNIDVB=m +CONFIG_VIDEO_IPU3_CIO2=m +CONFIG_CIO2_BRIDGE=y +CONFIG_RADIO_ADAPTERS=y +CONFIG_RADIO_TEA575X=m +CONFIG_RADIO_SI470X=m +CONFIG_USB_SI470X=m +CONFIG_I2C_SI470X=m +CONFIG_RADIO_SI4713=m +CONFIG_USB_SI4713=m +CONFIG_PLATFORM_SI4713=m +CONFIG_I2C_SI4713=m +CONFIG_RADIO_SI476X=m +CONFIG_USB_MR800=m +CONFIG_USB_DSBR=m +CONFIG_RADIO_MAXIRADIO=m +CONFIG_RADIO_SHARK=m +CONFIG_RADIO_SHARK2=m +CONFIG_USB_KEENE=m +CONFIG_USB_RAREMONO=m +CONFIG_USB_MA901=m +CONFIG_RADIO_TEA5764=m +CONFIG_RADIO_SAA7706H=m +CONFIG_RADIO_TEF6862=m +CONFIG_RADIO_WL1273=m +CONFIG_RADIO_WL128X=m +CONFIG_MEDIA_COMMON_OPTIONS=y + +# +# common driver options +# +CONFIG_VIDEO_CX2341X=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_DMA_CONTIG=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF2_DMA_SG=m +CONFIG_VIDEOBUF2_DVB=m +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_SIANO_RC=y +# CONFIG_SMS_SIANO_DEBUGFS is not set +CONFIG_VIDEO_V4L2_TPG=m +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_VIDEO_CAFE_CCIC=m +CONFIG_VIDEO_CADENCE=y +CONFIG_VIDEO_CADENCE_CSI2RX=m +CONFIG_VIDEO_CADENCE_CSI2TX=m +CONFIG_VIDEO_ASPEED=m +CONFIG_V4L_MEM2MEM_DRIVERS=y +CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m +CONFIG_DVB_PLATFORM_DRIVERS=y + +# +# MMC/SDIO DVB adapters +# +CONFIG_SMS_SDIO_DRV=m +CONFIG_V4L_TEST_DRIVERS=y +CONFIG_VIDEO_VIMC=m +CONFIG_VIDEO_VIVID=m +CONFIG_VIDEO_VIVID_CEC=y +CONFIG_VIDEO_VIVID_MAX_DEVS=64 +CONFIG_VIDEO_VIM2M=m +CONFIG_VIDEO_VICODEC=m +CONFIG_DVB_TEST_DRIVERS=y +CONFIG_DVB_VIDTV=m + +# +# FireWire (IEEE 1394) Adapters +# +CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y +# end of Media drivers + +CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV=y + +# +# Media ancillary drivers +# +CONFIG_MEDIA_ATTACH=y + +# +# IR I2C driver auto-selected by 'Autoselect ancillary drivers' +# +CONFIG_VIDEO_IR_I2C=m + +# +# audio, video and radio I2C drivers auto-selected by 'Autoselect ancillary drivers' +# +CONFIG_VIDEO_TVAUDIO=m +CONFIG_VIDEO_TDA7432=m +CONFIG_VIDEO_TDA9840=m +CONFIG_VIDEO_TEA6415C=m +CONFIG_VIDEO_TEA6420=m +CONFIG_VIDEO_MSP3400=m +CONFIG_VIDEO_CS3308=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_UDA1342=m +CONFIG_VIDEO_WM8775=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_VP27SMPX=m +CONFIG_VIDEO_SONY_BTF_MPX=m +CONFIG_VIDEO_SAA6588=m +CONFIG_VIDEO_SAA711X=m +CONFIG_VIDEO_TVP5150=m +CONFIG_VIDEO_TW2804=m +CONFIG_VIDEO_TW9903=m +CONFIG_VIDEO_TW9906=m + +# +# Video and audio decoders +# +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m +CONFIG_VIDEO_SAA7127=m +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m +CONFIG_VIDEO_SAA6752HS=m +CONFIG_VIDEO_M52790=m + +# +# Camera sensor devices +# +CONFIG_VIDEO_APTINA_PLL=m +CONFIG_VIDEO_CCS_PLL=m +CONFIG_VIDEO_HI556=m +CONFIG_VIDEO_IMX214=m +CONFIG_VIDEO_IMX219=m +CONFIG_VIDEO_IMX258=m +CONFIG_VIDEO_IMX274=m +CONFIG_VIDEO_IMX290=m +CONFIG_VIDEO_IMX319=m +CONFIG_VIDEO_IMX355=m +CONFIG_VIDEO_OV02A10=m +CONFIG_VIDEO_OV2640=m +CONFIG_VIDEO_OV2659=m +CONFIG_VIDEO_OV2680=m +CONFIG_VIDEO_OV2685=m +CONFIG_VIDEO_OV2740=m +CONFIG_VIDEO_OV5647=m +CONFIG_VIDEO_OV5648=m +CONFIG_VIDEO_OV6650=m +CONFIG_VIDEO_OV5670=m +CONFIG_VIDEO_OV5675=m +CONFIG_VIDEO_OV5695=m +CONFIG_VIDEO_OV7251=m +CONFIG_VIDEO_OV772X=m +CONFIG_VIDEO_OV7640=m +CONFIG_VIDEO_OV7670=m +CONFIG_VIDEO_OV7740=m +CONFIG_VIDEO_OV8856=m +CONFIG_VIDEO_OV8865=m +CONFIG_VIDEO_OV9640=m +CONFIG_VIDEO_OV9650=m +CONFIG_VIDEO_OV9734=m +CONFIG_VIDEO_OV13858=m +CONFIG_VIDEO_VS6624=m +CONFIG_VIDEO_MT9M001=m +CONFIG_VIDEO_MT9M032=m +CONFIG_VIDEO_MT9M111=m +CONFIG_VIDEO_MT9P031=m +CONFIG_VIDEO_MT9T001=m +CONFIG_VIDEO_MT9T112=m +CONFIG_VIDEO_MT9V011=m +CONFIG_VIDEO_MT9V032=m +CONFIG_VIDEO_MT9V111=m +CONFIG_VIDEO_SR030PC30=m +CONFIG_VIDEO_NOON010PC30=m +CONFIG_VIDEO_M5MOLS=m +CONFIG_VIDEO_MAX9271_LIB=m +CONFIG_VIDEO_RDACM20=m +CONFIG_VIDEO_RDACM21=m +CONFIG_VIDEO_RJ54N1=m +CONFIG_VIDEO_S5K6AA=m +CONFIG_VIDEO_S5K6A3=m +CONFIG_VIDEO_S5K4ECGX=m +CONFIG_VIDEO_S5K5BAF=m +CONFIG_VIDEO_CCS=m +CONFIG_VIDEO_ET8EK8=m +CONFIG_VIDEO_S5C73M3=m +# end of Camera sensor devices + +# +# Lens drivers +# +CONFIG_VIDEO_AD5820=m +CONFIG_VIDEO_AK7375=m +CONFIG_VIDEO_DW9714=m +CONFIG_VIDEO_DW9768=m +CONFIG_VIDEO_DW9807_VCM=m +# end of Lens drivers + +# +# Flash devices +# +CONFIG_VIDEO_ADP1653=m +CONFIG_VIDEO_LM3560=m +CONFIG_VIDEO_LM3646=m +# end of Flash devices + +# +# SPI I2C drivers auto-selected by 'Autoselect ancillary drivers' +# + +# +# Media SPI Adapters +# +CONFIG_CXD2880_SPI_DRV=m +# end of Media SPI Adapters + +CONFIG_MEDIA_TUNER=m + +# +# Tuner drivers auto-selected by 'Autoselect ancillary drivers' +# +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA18250=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC5000=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_M88RS6000T=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_MEDIA_TUNER_SI2157=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_MXL301RF=m +CONFIG_MEDIA_TUNER_QM1D1C0042=m +CONFIG_MEDIA_TUNER_QM1D1B0004=m + +# +# DVB Frontend drivers auto-selected by 'Autoselect ancillary drivers' +# + +# +# Multistandard (satellite) frontends +# +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV0910=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_STV6111=m +CONFIG_DVB_MXL5XX=m +CONFIG_DVB_M88DS3103=m + +# +# Multistandard (cable + terrestrial) frontends +# +CONFIG_DVB_DRXK=m +CONFIG_DVB_TDA18271C2DD=m +CONFIG_DVB_SI2165=m +CONFIG_DVB_MN88472=m +CONFIG_DVB_MN88473=m + +# +# DVB-S (satellite) frontends +# +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_MT312=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24120=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_TDA10071=m + +# +# DVB-T (terrestrial) frontends +# +CONFIG_DVB_SP8870=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_DRXD=m +CONFIG_DVB_L64781=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_MT352=m +CONFIG_DVB_ZL10353=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_AF9013=m +CONFIG_DVB_EC100=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m +CONFIG_DVB_RTL2830=m +CONFIG_DVB_RTL2832=m +CONFIG_DVB_SI2168=m +CONFIG_DVB_AS102_FE=m +CONFIG_DVB_ZD1301_DEMOD=m +CONFIG_DVB_GP8PSK_FE=m + +# +# DVB-C (cable) frontends +# +CONFIG_DVB_VES1820=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_STV0297=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT3306A=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_AU8522=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_S5H1411=m +CONFIG_DVB_MXL692=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_S921=m +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# +CONFIG_DVB_TC90522=m + +# +# Digital terrestrial only tuners/PLL +# +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# +CONFIG_DVB_DRX39XYJ=m +CONFIG_DVB_LNBH25=m +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_A8293=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_AF9033=m +CONFIG_DVB_HORUS3A=m +CONFIG_DVB_ASCOT2E=m +CONFIG_DVB_HELENE=m + +# +# Common Interface (EN50221) controller drivers +# +CONFIG_DVB_CXD2099=m +CONFIG_DVB_SP2=m + +# +# Tools to develop new frontends +# +CONFIG_DVB_DUMMY_FE=m +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_AGP=m +CONFIG_AGP_AMD64=m +CONFIG_AGP_INTEL=m +CONFIG_AGP_SIS=m +CONFIG_AGP_VIA=m +CONFIG_INTEL_GTT=m +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=10 +CONFIG_VGA_SWITCHEROO=y +CONFIG_DRM=m +CONFIG_DRM_MIPI_DBI=m +CONFIG_DRM_MIPI_DSI=y +CONFIG_DRM_DP_AUX_CHARDEV=y +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_CMA_HELPER=y +CONFIG_DRM_KMS_CMA_HELPER=y +CONFIG_DRM_GEM_SHMEM_HELPER=y +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m +CONFIG_DRM_I2C_NXP_TDA998X=m +CONFIG_DRM_I2C_NXP_TDA9950=m +# end of I2C encoder or helper chips + +# +# ARM devices +# +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y + +# +# ACP (Audio CoProcessor) Configuration +# +CONFIG_DRM_AMD_ACP=y +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_DCN=y +CONFIG_DRM_AMD_DC_HDCP=y +CONFIG_DRM_AMD_DC_SI=y +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +CONFIG_HSA_AMD=y +CONFIG_DRM_NOUVEAU=m +# CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT is not set +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_NOUVEAU_SVM=y +CONFIG_DRM_I915=m +CONFIG_DRM_I915_FORCE_PROBE="*" +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +CONFIG_DRM_I915_USERPTR=y +CONFIG_DRM_I915_GVT=y +CONFIG_DRM_I915_GVT_KVMGT=m +CONFIG_DRM_I915_REQUEST_TIMEOUT=20000 +CONFIG_DRM_I915_FENCE_TIMEOUT=10000 +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 +CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500 +CONFIG_DRM_I915_PREEMPT_TIMEOUT=640 +CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000 +CONFIG_DRM_I915_STOP_TIMEOUT=100 +CONFIG_DRM_I915_TIMESLICE_DURATION=1 +CONFIG_DRM_VGEM=m +CONFIG_DRM_VKMS=m +CONFIG_DRM_VMWGFX=m +CONFIG_DRM_VMWGFX_FBCON=y +CONFIG_DRM_GMA500=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +CONFIG_DRM_ANALOGIX_ANX78XX=m +CONFIG_DRM_ANALOGIX_DP=m +# end of Display Interface Bridges + +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_DRM_GM12U320=m +CONFIG_TINYDRM_HX8357D=m +CONFIG_TINYDRM_ILI9225=m +CONFIG_TINYDRM_ILI9341=m +CONFIG_TINYDRM_ILI9486=m +CONFIG_TINYDRM_MI0283QT=m +CONFIG_TINYDRM_REPAPER=m +CONFIG_TINYDRM_ST7586=m +CONFIG_TINYDRM_ST7735R=m +CONFIG_DRM_XEN=y +CONFIG_DRM_XEN_FRONTEND=m +CONFIG_DRM_VBOXVIDEO=m +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_BOOT_VESA_SUPPORT=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_IMAGEBLIT=m +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +CONFIG_FB_UVESA=m +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +CONFIG_XEN_FBDEV_FRONTEND=m +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_HYPERV=m +CONFIG_FB_SIMPLE=y +# CONFIG_FB_SM712 is not set +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_L4F00242T03=m +CONFIG_LCD_LMS283GF05=m +CONFIG_LCD_LTV350QV=m +CONFIG_LCD_ILI922X=m +CONFIG_LCD_ILI9320=m +CONFIG_LCD_TDO24M=m +CONFIG_LCD_VGG2432A4=m +CONFIG_LCD_PLATFORM=m +CONFIG_LCD_AMS369FG06=m +CONFIG_LCD_LMS501KF03=m +CONFIG_LCD_HX8357=m +CONFIG_LCD_OTM3225A=m +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_KTD253=m +CONFIG_BACKLIGHT_LM3533=m +CONFIG_BACKLIGHT_PWM=m +CONFIG_BACKLIGHT_DA903X=m +CONFIG_BACKLIGHT_DA9052=m +CONFIG_BACKLIGHT_MAX8925=m +CONFIG_BACKLIGHT_APPLE=m +CONFIG_BACKLIGHT_QCOM_WLED=m +CONFIG_BACKLIGHT_SAHARA=m +CONFIG_BACKLIGHT_WM831X=m +CONFIG_BACKLIGHT_ADP5520=m +CONFIG_BACKLIGHT_ADP8860=m +CONFIG_BACKLIGHT_ADP8870=m +CONFIG_BACKLIGHT_88PM860X=m +CONFIG_BACKLIGHT_PCF50633=m +CONFIG_BACKLIGHT_AAT2870=m +CONFIG_BACKLIGHT_LM3630A=m +CONFIG_BACKLIGHT_LM3639=m +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_LP8788=m +CONFIG_BACKLIGHT_PANDORA=m +CONFIG_BACKLIGHT_SKY81452=m +CONFIG_BACKLIGHT_AS3711=m +CONFIG_BACKLIGHT_GPIO=m +CONFIG_BACKLIGHT_LV5207LP=m +CONFIG_BACKLIGHT_BD6107=m +CONFIG_BACKLIGHT_ARCXCNN=m +CONFIG_BACKLIGHT_RAVE_SP=m +# end of Backlight & LCD device support + +CONFIG_VIDEOMODE_HELPERS=y +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y +# end of Console display driver support + +# CONFIG_LOGO is not set +# end of Graphics support + +CONFIG_SOUND=m +CONFIG_SOUND_OSS_CORE=y +# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set +CONFIG_SND=m +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_PCM_ELD=y +CONFIG_SND_PCM_IEC958=y +CONFIG_SND_DMAENGINE_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_COMPRESS_OFFLOAD=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=m +CONFIG_SND_PCM_OSS=m +CONFIG_SND_PCM_OSS_PLUGINS=y +CONFIG_SND_PCM_TIMER=y +CONFIG_SND_HRTIMER=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +CONFIG_SND_VERBOSE_PRINTK=y +CONFIG_SND_DEBUG=y +# CONFIG_SND_DEBUG_VERBOSE is not set +# CONFIG_SND_PCM_XRUN_DEBUG is not set +# CONFIG_SND_CTL_VALIDATION is not set +# CONFIG_SND_JACK_INJECTION_DEBUG is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_DMA_SGBUF=y +CONFIG_SND_CTL_LED=m +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_SEQ_MIDI_EMUL=m +CONFIG_SND_SEQ_VIRMIDI=m +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SND_VX_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y +# CONFIG_SND_PCSP is not set +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MTS64=m +CONFIG_SND_SERIAL_U16550=m +CONFIG_SND_MPU401=m +CONFIG_SND_PORTMAN2X4=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0 +CONFIG_SND_SB_COMMON=m +CONFIG_SND_PCI=y +CONFIG_SND_AD1889=m +CONFIG_SND_ALS300=m +CONFIG_SND_ALS4000=m +CONFIG_SND_ALI5451=m +CONFIG_SND_ASIHPI=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +CONFIG_SND_AW2=m +CONFIG_SND_AZT3328=m +CONFIG_SND_BT87X=m +# CONFIG_SND_BT87X_OVERCLOCK is not set +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN_LIB=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS4281=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_EMU10K1=m +CONFIG_SND_EMU10K1_SEQ=m +CONFIG_SND_EMU10K1X=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_ES1938=m +CONFIG_SND_ES1968=m +CONFIG_SND_ES1968_INPUT=y +CONFIG_SND_ES1968_RADIO=y +CONFIG_SND_FM801=m +CONFIG_SND_FM801_TEA575X_BOOL=y +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1712=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MAESTRO3=m +CONFIG_SND_MAESTRO3_INPUT=y +CONFIG_SND_MIXART=m +CONFIG_SND_NM256=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RIPTIDE=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_SONICVIBES=m +CONFIG_SND_TRIDENT=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_YMFPCI=m + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_GENERIC_LEDS=y +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=1 +CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM=y +# end of HD-Audio + +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y +CONFIG_SND_HDA_I915=y +CONFIG_SND_HDA_EXT_CORE=m +CONFIG_SND_HDA_PREALLOC_SIZE=0 +CONFIG_SND_INTEL_NHLT=y +CONFIG_SND_INTEL_DSP_CONFIG=m +CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m +CONFIG_SND_INTEL_BYT_PREFER_SOF=y +CONFIG_SND_SPI=y +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_USX2Y=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_US122L=m +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_FIREWIRE=y +CONFIG_SND_FIREWIRE_LIB=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_PCMCIA=y +CONFIG_SND_VXPOCKET=m +CONFIG_SND_PDAUDIOCF=m +CONFIG_SND_SOC=m +CONFIG_SND_SOC_AC97_BUS=y +CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y +CONFIG_SND_SOC_COMPRESS=y +CONFIG_SND_SOC_TOPOLOGY=y +CONFIG_SND_SOC_ACPI=m +CONFIG_SND_SOC_ADI=m +CONFIG_SND_SOC_ADI_AXI_I2S=m +CONFIG_SND_SOC_ADI_AXI_SPDIF=m +CONFIG_SND_SOC_AMD_ACP=m +CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH=m +CONFIG_SND_SOC_AMD_CZ_RT5645_MACH=m +CONFIG_SND_SOC_AMD_ACP3x=m +CONFIG_SND_SOC_AMD_RV_RT5682_MACH=m +CONFIG_SND_SOC_AMD_RENOIR=m +CONFIG_SND_SOC_AMD_RENOIR_MACH=m +CONFIG_SND_ATMEL_SOC=m +# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set +CONFIG_SND_DESIGNWARE_I2S=m +CONFIG_SND_DESIGNWARE_PCM=y + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_AUDMIX is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_FSL_MICFIL is not set +CONFIG_SND_SOC_FSL_XCVR=m +# CONFIG_SND_SOC_FSL_RPMSG is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# end of SoC Audio for Freescale CPUs + +CONFIG_SND_I2S_HI6210_I2S=m +CONFIG_SND_SOC_IMG=y +CONFIG_SND_SOC_IMG_I2S_IN=m +CONFIG_SND_SOC_IMG_I2S_OUT=m +CONFIG_SND_SOC_IMG_PARALLEL_OUT=m +CONFIG_SND_SOC_IMG_SPDIF_IN=m +CONFIG_SND_SOC_IMG_SPDIF_OUT=m +CONFIG_SND_SOC_IMG_PISTACHIO_INTERNAL_DAC=m +CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y +CONFIG_SND_SOC_INTEL_SST=m +CONFIG_SND_SOC_INTEL_CATPT=m +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI=m +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=m +CONFIG_SND_SOC_INTEL_SKYLAKE=m +CONFIG_SND_SOC_INTEL_SKL=m +CONFIG_SND_SOC_INTEL_APL=m +CONFIG_SND_SOC_INTEL_KBL=m +CONFIG_SND_SOC_INTEL_GLK=m +CONFIG_SND_SOC_INTEL_CNL=m +CONFIG_SND_SOC_INTEL_CFL=m +CONFIG_SND_SOC_INTEL_CML_H=m +CONFIG_SND_SOC_INTEL_CML_LP=m +CONFIG_SND_SOC_INTEL_SKYLAKE_FAMILY=m +CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m +CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC=y +CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON=m +CONFIG_SND_SOC_ACPI_INTEL_MATCH=m +CONFIG_SND_SOC_INTEL_MACH=y +CONFIG_SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES=y +CONFIG_SND_SOC_INTEL_HASWELL_MACH=m +CONFIG_SND_SOC_INTEL_BDW_RT5650_MACH=m +CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH=m +CONFIG_SND_SOC_INTEL_BROADWELL_MACH=m +CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH=m +CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH=m +CONFIG_SND_SOC_INTEL_BYTCR_WM5102_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH=m +CONFIG_SND_SOC_INTEL_CHT_BSW_NAU8824_MACH=m +CONFIG_SND_SOC_INTEL_BYT_CHT_CX2072X_MACH=m +CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH=m +CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH=m +# CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH is not set +CONFIG_SND_SOC_INTEL_SKL_RT286_MACH=m +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH=m +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_DA7219_MAX98357A_GENERIC=m +CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON=m +CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m +CONFIG_SND_SOC_INTEL_SOF_WM8804_MACH=m +CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m +CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH=m +CONFIG_SND_SOC_INTEL_KBL_RT5660_MACH=m +CONFIG_SND_SOC_INTEL_GLK_DA7219_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH=m +CONFIG_SND_SOC_INTEL_SOF_RT5682_MACH=m +CONFIG_SND_SOC_INTEL_SOF_PCM512x_MACH=m +CONFIG_SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH=m +CONFIG_SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH=m +CONFIG_SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH=m +CONFIG_SND_SOC_INTEL_EHL_RT5660_MACH=m +CONFIG_SND_SOC_INTEL_SOUNDWIRE_SOF_MACH=m +CONFIG_SND_SOC_MTK_BTCVSD=m +CONFIG_SND_SOC_SOF_TOPLEVEL=y +CONFIG_SND_SOC_SOF_PCI_DEV=m +CONFIG_SND_SOC_SOF_PCI=m +CONFIG_SND_SOC_SOF_ACPI=m +CONFIG_SND_SOC_SOF_ACPI_DEV=m +# CONFIG_SND_SOC_SOF_DEBUG_PROBES is not set +CONFIG_SND_SOC_SOF=m +CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE=y +CONFIG_SND_SOC_SOF_INTEL_TOPLEVEL=y +CONFIG_SND_SOC_SOF_INTEL_HIFI_EP_IPC=m +CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP=m +CONFIG_SND_SOC_SOF_INTEL_COMMON=m +CONFIG_SND_SOC_SOF_BAYTRAIL=m +# CONFIG_SND_SOC_SOF_BROADWELL is not set +CONFIG_SND_SOC_SOF_MERRIFIELD=m +CONFIG_SND_SOC_SOF_INTEL_APL=m +CONFIG_SND_SOC_SOF_APOLLOLAKE=m +CONFIG_SND_SOC_SOF_GEMINILAKE=m +CONFIG_SND_SOC_SOF_INTEL_CNL=m +CONFIG_SND_SOC_SOF_CANNONLAKE=m +CONFIG_SND_SOC_SOF_COFFEELAKE=m +CONFIG_SND_SOC_SOF_COMETLAKE=m +CONFIG_SND_SOC_SOF_INTEL_ICL=m +CONFIG_SND_SOC_SOF_ICELAKE=m +CONFIG_SND_SOC_SOF_JASPERLAKE=m +CONFIG_SND_SOC_SOF_INTEL_TGL=m +CONFIG_SND_SOC_SOF_TIGERLAKE=m +CONFIG_SND_SOC_SOF_ELKHARTLAKE=m +CONFIG_SND_SOC_SOF_ALDERLAKE=m +CONFIG_SND_SOC_SOF_HDA_COMMON=m +CONFIG_SND_SOC_SOF_HDA_LINK=y +CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC=y +# CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1 is not set +CONFIG_SND_SOC_SOF_HDA_LINK_BASELINE=m +CONFIG_SND_SOC_SOF_HDA=m +CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=m +CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE=m +CONFIG_SND_SOC_SOF_XTENSA=m + +# +# STMicroelectronics STM32 SOC audio support +# +# end of STMicroelectronics STM32 SOC audio support + +CONFIG_SND_SOC_XILINX_I2S=m +CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=m +CONFIG_SND_SOC_XILINX_SPDIF=m +CONFIG_SND_SOC_XTFPGA_I2S=m +CONFIG_SND_SOC_I2C_AND_SPI=m + +# +# CODEC drivers +# +CONFIG_SND_SOC_ARIZONA=m +CONFIG_SND_SOC_WM_ADSP=m +CONFIG_SND_SOC_AC97_CODEC=m +CONFIG_SND_SOC_ADAU_UTILS=m +CONFIG_SND_SOC_ADAU1372=m +CONFIG_SND_SOC_ADAU1372_I2C=m +CONFIG_SND_SOC_ADAU1372_SPI=m +CONFIG_SND_SOC_ADAU1701=m +CONFIG_SND_SOC_ADAU17X1=m +CONFIG_SND_SOC_ADAU1761=m +CONFIG_SND_SOC_ADAU1761_I2C=m +CONFIG_SND_SOC_ADAU1761_SPI=m +CONFIG_SND_SOC_ADAU7002=m +CONFIG_SND_SOC_ADAU7118=m +CONFIG_SND_SOC_ADAU7118_HW=m +CONFIG_SND_SOC_ADAU7118_I2C=m +CONFIG_SND_SOC_AK4104=m +CONFIG_SND_SOC_AK4118=m +CONFIG_SND_SOC_AK4458=m +CONFIG_SND_SOC_AK4554=m +CONFIG_SND_SOC_AK4613=m +CONFIG_SND_SOC_AK4642=m +CONFIG_SND_SOC_AK5386=m +CONFIG_SND_SOC_AK5558=m +CONFIG_SND_SOC_ALC5623=m +CONFIG_SND_SOC_BD28623=m +# CONFIG_SND_SOC_BT_SCO is not set +CONFIG_SND_SOC_CROS_EC_CODEC=m +CONFIG_SND_SOC_CS35L32=m +CONFIG_SND_SOC_CS35L33=m +CONFIG_SND_SOC_CS35L34=m +CONFIG_SND_SOC_CS35L35=m +CONFIG_SND_SOC_CS35L36=m +CONFIG_SND_SOC_CS42L42=m +CONFIG_SND_SOC_CS42L51=m +CONFIG_SND_SOC_CS42L51_I2C=m +CONFIG_SND_SOC_CS42L52=m +CONFIG_SND_SOC_CS42L56=m +CONFIG_SND_SOC_CS42L73=m +CONFIG_SND_SOC_CS4234=m +CONFIG_SND_SOC_CS4265=m +CONFIG_SND_SOC_CS4270=m +CONFIG_SND_SOC_CS4271=m +CONFIG_SND_SOC_CS4271_I2C=m +CONFIG_SND_SOC_CS4271_SPI=m +CONFIG_SND_SOC_CS42XX8=m +CONFIG_SND_SOC_CS42XX8_I2C=m +CONFIG_SND_SOC_CS43130=m +CONFIG_SND_SOC_CS4341=m +CONFIG_SND_SOC_CS4349=m +CONFIG_SND_SOC_CS53L30=m +CONFIG_SND_SOC_CX2072X=m +CONFIG_SND_SOC_DA7213=m +CONFIG_SND_SOC_DA7219=m +CONFIG_SND_SOC_DMIC=m +CONFIG_SND_SOC_HDMI_CODEC=m +CONFIG_SND_SOC_ES7134=m +CONFIG_SND_SOC_ES7241=m +CONFIG_SND_SOC_ES8316=m +CONFIG_SND_SOC_ES8328=m +CONFIG_SND_SOC_ES8328_I2C=m +CONFIG_SND_SOC_ES8328_SPI=m +CONFIG_SND_SOC_GTM601=m +CONFIG_SND_SOC_HDAC_HDMI=m +CONFIG_SND_SOC_HDAC_HDA=m +CONFIG_SND_SOC_INNO_RK3036=m +CONFIG_SND_SOC_MAX98088=m +CONFIG_SND_SOC_MAX98090=m +CONFIG_SND_SOC_MAX98357A=m +CONFIG_SND_SOC_MAX98504=m +CONFIG_SND_SOC_MAX9867=m +CONFIG_SND_SOC_MAX98927=m +CONFIG_SND_SOC_MAX98373=m +CONFIG_SND_SOC_MAX98373_I2C=m +CONFIG_SND_SOC_MAX98373_SDW=m +CONFIG_SND_SOC_MAX98390=m +CONFIG_SND_SOC_MAX9860=m +CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m +CONFIG_SND_SOC_PCM1681=m +CONFIG_SND_SOC_PCM1789=m +CONFIG_SND_SOC_PCM1789_I2C=m +CONFIG_SND_SOC_PCM179X=m +CONFIG_SND_SOC_PCM179X_I2C=m +CONFIG_SND_SOC_PCM179X_SPI=m +CONFIG_SND_SOC_PCM186X=m +CONFIG_SND_SOC_PCM186X_I2C=m +CONFIG_SND_SOC_PCM186X_SPI=m +CONFIG_SND_SOC_PCM3060=m +CONFIG_SND_SOC_PCM3060_I2C=m +CONFIG_SND_SOC_PCM3060_SPI=m +CONFIG_SND_SOC_PCM3168A=m +CONFIG_SND_SOC_PCM3168A_I2C=m +CONFIG_SND_SOC_PCM3168A_SPI=m +CONFIG_SND_SOC_PCM5102A=m +CONFIG_SND_SOC_PCM512x=m +CONFIG_SND_SOC_PCM512x_I2C=m +CONFIG_SND_SOC_PCM512x_SPI=m +CONFIG_SND_SOC_RK3328=m +CONFIG_SND_SOC_RL6231=m +CONFIG_SND_SOC_RL6347A=m +CONFIG_SND_SOC_RT286=m +CONFIG_SND_SOC_RT298=m +CONFIG_SND_SOC_RT1011=m +CONFIG_SND_SOC_RT1015=m +CONFIG_SND_SOC_RT1015P=m +CONFIG_SND_SOC_RT1308=m +CONFIG_SND_SOC_RT1308_SDW=m +CONFIG_SND_SOC_RT1316_SDW=m +CONFIG_SND_SOC_RT5514=m +CONFIG_SND_SOC_RT5514_SPI=m +CONFIG_SND_SOC_RT5616=m +CONFIG_SND_SOC_RT5631=m +CONFIG_SND_SOC_RT5640=m +CONFIG_SND_SOC_RT5645=m +CONFIG_SND_SOC_RT5651=m +CONFIG_SND_SOC_RT5659=m +CONFIG_SND_SOC_RT5660=m +CONFIG_SND_SOC_RT5663=m +CONFIG_SND_SOC_RT5670=m +CONFIG_SND_SOC_RT5677=m +CONFIG_SND_SOC_RT5677_SPI=m +CONFIG_SND_SOC_RT5682=m +CONFIG_SND_SOC_RT5682_I2C=m +CONFIG_SND_SOC_RT5682_SDW=m +CONFIG_SND_SOC_RT700=m +CONFIG_SND_SOC_RT700_SDW=m +CONFIG_SND_SOC_RT711=m +CONFIG_SND_SOC_RT711_SDW=m +CONFIG_SND_SOC_RT711_SDCA_SDW=m +CONFIG_SND_SOC_RT715=m +CONFIG_SND_SOC_RT715_SDW=m +CONFIG_SND_SOC_RT715_SDCA_SDW=m +CONFIG_SND_SOC_SGTL5000=m +CONFIG_SND_SOC_SI476X=m +CONFIG_SND_SOC_SIGMADSP=m +CONFIG_SND_SOC_SIGMADSP_I2C=m +CONFIG_SND_SOC_SIGMADSP_REGMAP=m +CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m +CONFIG_SND_SOC_SIMPLE_MUX=m +CONFIG_SND_SOC_SPDIF=m +CONFIG_SND_SOC_SSM2305=m +CONFIG_SND_SOC_SSM2602=m +CONFIG_SND_SOC_SSM2602_SPI=m +CONFIG_SND_SOC_SSM2602_I2C=m +CONFIG_SND_SOC_SSM4567=m +CONFIG_SND_SOC_STA32X=m +CONFIG_SND_SOC_STA350=m +CONFIG_SND_SOC_STI_SAS=m +CONFIG_SND_SOC_TAS2552=m +CONFIG_SND_SOC_TAS2562=m +CONFIG_SND_SOC_TAS2764=m +CONFIG_SND_SOC_TAS2770=m +CONFIG_SND_SOC_TAS5086=m +CONFIG_SND_SOC_TAS571X=m +CONFIG_SND_SOC_TAS5720=m +CONFIG_SND_SOC_TAS6424=m +CONFIG_SND_SOC_TDA7419=m +CONFIG_SND_SOC_TFA9879=m +CONFIG_SND_SOC_TLV320AIC23=m +CONFIG_SND_SOC_TLV320AIC23_I2C=m +CONFIG_SND_SOC_TLV320AIC23_SPI=m +CONFIG_SND_SOC_TLV320AIC31XX=m +CONFIG_SND_SOC_TLV320AIC32X4=m +CONFIG_SND_SOC_TLV320AIC32X4_I2C=m +CONFIG_SND_SOC_TLV320AIC32X4_SPI=m +# CONFIG_SND_SOC_TLV320AIC3X_I2C is not set +# CONFIG_SND_SOC_TLV320AIC3X_SPI is not set +CONFIG_SND_SOC_TLV320ADCX140=m +CONFIG_SND_SOC_TS3A227E=m +CONFIG_SND_SOC_TSCS42XX=m +CONFIG_SND_SOC_TSCS454=m +CONFIG_SND_SOC_UDA1334=m +CONFIG_SND_SOC_WCD9335=m +CONFIG_SND_SOC_WCD934X=m +CONFIG_SND_SOC_WM5102=m +CONFIG_SND_SOC_WM8510=m +CONFIG_SND_SOC_WM8523=m +CONFIG_SND_SOC_WM8524=m +CONFIG_SND_SOC_WM8580=m +CONFIG_SND_SOC_WM8711=m +CONFIG_SND_SOC_WM8728=m +CONFIG_SND_SOC_WM8731=m +CONFIG_SND_SOC_WM8737=m +CONFIG_SND_SOC_WM8741=m +CONFIG_SND_SOC_WM8750=m +CONFIG_SND_SOC_WM8753=m +CONFIG_SND_SOC_WM8770=m +CONFIG_SND_SOC_WM8776=m +CONFIG_SND_SOC_WM8782=m +CONFIG_SND_SOC_WM8804=m +CONFIG_SND_SOC_WM8804_I2C=m +CONFIG_SND_SOC_WM8804_SPI=m +CONFIG_SND_SOC_WM8903=m +CONFIG_SND_SOC_WM8904=m +CONFIG_SND_SOC_WM8960=m +CONFIG_SND_SOC_WM8962=m +CONFIG_SND_SOC_WM8974=m +CONFIG_SND_SOC_WM8978=m +CONFIG_SND_SOC_WM8985=m +CONFIG_SND_SOC_WSA881X=m +CONFIG_SND_SOC_ZL38060=m +CONFIG_SND_SOC_ZX_AUD96P22=m +CONFIG_SND_SOC_MAX9759=m +CONFIG_SND_SOC_MT6351=m +CONFIG_SND_SOC_MT6358=m +CONFIG_SND_SOC_MT6660=m +CONFIG_SND_SOC_NAU8315=m +CONFIG_SND_SOC_NAU8540=m +CONFIG_SND_SOC_NAU8810=m +CONFIG_SND_SOC_NAU8822=m +CONFIG_SND_SOC_NAU8824=m +CONFIG_SND_SOC_NAU8825=m +CONFIG_SND_SOC_TPA6130A2=m +CONFIG_SND_SOC_LPASS_WSA_MACRO=m +CONFIG_SND_SOC_LPASS_VA_MACRO=m +CONFIG_SND_SOC_LPASS_RX_MACRO=m +CONFIG_SND_SOC_LPASS_TX_MACRO=m +# end of CODEC drivers + +CONFIG_SND_SIMPLE_CARD_UTILS=m +CONFIG_SND_SIMPLE_CARD=m +CONFIG_SND_X86=y +CONFIG_HDMI_LPE_AUDIO=m +CONFIG_SND_SYNTH_EMUX=m +CONFIG_SND_XEN_FRONTEND=m +# CONFIG_SND_VIRTIO is not set +CONFIG_AC97_BUS=m + +# +# HID support +# +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +CONFIG_HID_ACCUTOUCH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +CONFIG_HID_BIGBEN_FF=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +CONFIG_HID_COUGAR=m +CONFIG_HID_MACALLY=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CP2112=m +CONFIG_HID_CREATIVE_SB0540=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=m +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_GLORIOUS=m +CONFIG_HID_HOLTEK=m +CONFIG_HOLTEK_FF=y +CONFIG_HID_GOOGLE_HAMMER=m +CONFIG_HID_VIVALDI=m +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_VIEWSONIC=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_LOGIWHEELS_FF=y +CONFIG_HID_MAGICMOUSE=m +CONFIG_HID_MALTRON=m +CONFIG_HID_MAYFLASH=m +CONFIG_HID_REDRAGON=m +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=m +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PICOLCD_CIR=y +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PLAYSTATION=m +CONFIG_PLAYSTATION_FF=y +CONFIG_HID_PRIMAX=m +CONFIG_HID_RETRODE=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEAM=m +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +CONFIG_GREENASIA_FF=y +CONFIG_HID_HYPERV_MOUSE=m +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_THRUSTMASTER_FF=y +CONFIG_HID_UDRAW_PS3=m +CONFIG_HID_U2FZERO=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_ZEROPLUS_FF=y +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +CONFIG_HID_MCP2221=m +# end of Special HID drivers + +# +# USB HID support +# +CONFIG_USB_HID=m +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +# +# I2C HID support +# +CONFIG_I2C_HID_ACPI=m +# end of I2C HID support + +CONFIG_I2C_HID_CORE=m + +# +# Intel ISH HID support +# +CONFIG_INTEL_ISH_HID=m +CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER=m +# end of Intel ISH HID support + +# +# AMD SFH HID Support +# +CONFIG_AMD_SFH_HID=m +# end of AMD SFH HID Support + +# +# Surface System Aggregator Module HID support +# +# CONFIG_SURFACE_KBD is not set +# end of Surface System Aggregator Module HID support +# end of HID support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +CONFIG_USB_CONN_GPIO=m +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=m + +# +# USB Host Controller Drivers +# +CONFIG_USB_C67X00_HCD=m +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=m +CONFIG_USB_XHCI_PCI_RENESAS=m +CONFIG_USB_XHCI_PLATFORM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_EHCI_FSL=m +CONFIG_USB_EHCI_HCD_PLATFORM=m +CONFIG_USB_OXU210HP_HCD=m +CONFIG_USB_ISP116X_HCD=m +CONFIG_USB_FOTG210_HCD=m +CONFIG_USB_MAX3421_HCD=m +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +CONFIG_USB_OHCI_HCD_PLATFORM=m +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_U132_HCD=m +CONFIG_USB_SL811_HCD=m +# CONFIG_USB_SL811_HCD_ISO is not set +CONFIG_USB_SL811_CS=m +CONFIG_USB_R8A66597_HCD=m +CONFIG_USB_HCD_BCMA=m +CONFIG_USB_HCD_SSB=m +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +CONFIG_USBIP_CORE=m +CONFIG_USBIP_VHCI_HCD=m +CONFIG_USBIP_VHCI_HC_PORTS=8 +CONFIG_USBIP_VHCI_NR_HCS=1 +CONFIG_USBIP_HOST=m +CONFIG_USBIP_VUDC=m +# CONFIG_USBIP_DEBUG is not set +CONFIG_USB_CDNS_SUPPORT=m +CONFIG_USB_CDNS_HOST=y +CONFIG_USB_CDNS3=m +CONFIG_USB_CDNS3_GADGET=y +CONFIG_USB_CDNS3_HOST=y +CONFIG_USB_CDNS3_PCI_WRAP=m +CONFIG_USB_CDNSP_PCI=m +CONFIG_USB_CDNSP_GADGET=y +CONFIG_USB_CDNSP_HOST=y +CONFIG_USB_MUSB_HDRC=m +# CONFIG_USB_MUSB_HOST is not set +# CONFIG_USB_MUSB_GADGET is not set +CONFIG_USB_MUSB_DUAL_ROLE=y + +# +# Platform Glue Layer +# + +# +# MUSB DMA mode +# +# CONFIG_MUSB_PIO_ONLY is not set +CONFIG_USB_DWC3=m +CONFIG_USB_DWC3_ULPI=y +# CONFIG_USB_DWC3_HOST is not set +# CONFIG_USB_DWC3_GADGET is not set +CONFIG_USB_DWC3_DUAL_ROLE=y + +# +# Platform Glue Driver Support +# +CONFIG_USB_DWC3_PCI=m +CONFIG_USB_DWC3_HAPS=m +CONFIG_USB_DWC2=m +# CONFIG_USB_DWC2_HOST is not set + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +# CONFIG_USB_DWC2_PERIPHERAL is not set +CONFIG_USB_DWC2_DUAL_ROLE=y +CONFIG_USB_DWC2_PCI=m +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +CONFIG_USB_CHIPIDEA=m +CONFIG_USB_CHIPIDEA_UDC=y +CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_CHIPIDEA_PCI=m +CONFIG_USB_CHIPIDEA_MSM=m +CONFIG_USB_CHIPIDEA_GENERIC=m +CONFIG_USB_ISP1760=m +CONFIG_USB_ISP1760_HCD=y +CONFIG_USB_ISP1761_UDC=y +# CONFIG_USB_ISP1760_HOST_ROLE is not set +# CONFIG_USB_ISP1760_GADGET_ROLE is not set +CONFIG_USB_ISP1760_DUAL_ROLE=y + +# +# USB port drivers +# +CONFIG_USB_USS720=m +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F81232=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_METRO=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +# CONFIG_USB_SERIAL_SAFE_PADDED is not set +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_WISHBONE=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_XR=m +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_CYPRESS_CY7C63=m +CONFIG_USB_CYTHERM=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_APPLE_MFI_FASTCHARGE=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +CONFIG_USB_TRANCEVIBRATOR=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_TEST=m +CONFIG_USB_EHSET_TEST_FIXTURE=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_YUREX=m +CONFIG_USB_EZUSB_FX2=m +CONFIG_USB_HUB_USB251XB=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_HSIC_USB4604=m +CONFIG_USB_LINK_LAYER_TEST=m +CONFIG_USB_CHAOSKEY=m +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +CONFIG_USB_PHY=y +CONFIG_NOP_USB_XCEIV=m +CONFIG_USB_GPIO_VBUS=m +CONFIG_TAHVO_USB=m +# CONFIG_TAHVO_USB_HOST_BY_DEFAULT is not set +CONFIG_USB_ISP1301=m +# end of USB Physical Layer drivers + +CONFIG_USB_GADGET=m +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 +CONFIG_U_SERIAL_CONSOLE=y + +# +# USB Peripheral Controller +# +CONFIG_USB_FOTG210_UDC=m +CONFIG_USB_GR_UDC=m +CONFIG_USB_R8A66597=m +CONFIG_USB_PXA27X=m +CONFIG_USB_MV_UDC=m +CONFIG_USB_MV_U3D=m +CONFIG_USB_SNP_CORE=m +CONFIG_USB_M66592=m +CONFIG_USB_BDC_UDC=m +CONFIG_USB_AMD5536UDC=m +CONFIG_USB_NET2272=m +# CONFIG_USB_NET2272_DMA is not set +CONFIG_USB_NET2280=m +CONFIG_USB_GOKU=m +CONFIG_USB_EG20T=m +CONFIG_USB_MAX3420_UDC=m +CONFIG_USB_DUMMY_HCD=m +# end of USB Peripheral Controller + +CONFIG_USB_LIBCOMPOSITE=m +CONFIG_USB_F_ACM=m +CONFIG_USB_F_SS_LB=m +CONFIG_USB_U_SERIAL=m +CONFIG_USB_U_ETHER=m +CONFIG_USB_U_AUDIO=m +CONFIG_USB_F_SERIAL=m +CONFIG_USB_F_OBEX=m +CONFIG_USB_F_NCM=m +CONFIG_USB_F_ECM=m +CONFIG_USB_F_PHONET=m +CONFIG_USB_F_EEM=m +CONFIG_USB_F_SUBSET=m +CONFIG_USB_F_RNDIS=m +CONFIG_USB_F_MASS_STORAGE=m +CONFIG_USB_F_FS=m +CONFIG_USB_F_UAC1=m +CONFIG_USB_F_UAC1_LEGACY=m +CONFIG_USB_F_UAC2=m +CONFIG_USB_F_UVC=m +CONFIG_USB_F_MIDI=m +CONFIG_USB_F_HID=m +CONFIG_USB_F_PRINTER=m +CONFIG_USB_F_TCM=m +CONFIG_USB_CONFIGFS=m +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_ACM=y +CONFIG_USB_CONFIGFS_OBEX=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_ECM=y +CONFIG_USB_CONFIGFS_ECM_SUBSET=y +CONFIG_USB_CONFIGFS_RNDIS=y +CONFIG_USB_CONFIGFS_EEM=y +CONFIG_USB_CONFIGFS_PHONET=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_LB_SS=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_UAC1=y +CONFIG_USB_CONFIGFS_F_UAC1_LEGACY=y +CONFIG_USB_CONFIGFS_F_UAC2=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_UVC=y +CONFIG_USB_CONFIGFS_F_PRINTER=y +CONFIG_USB_CONFIGFS_F_TCM=y + +# +# USB Gadget precomposed configurations +# +CONFIG_USB_ZERO=m +CONFIG_USB_AUDIO=m +# CONFIG_GADGET_UAC1 is not set +CONFIG_USB_ETH=m +CONFIG_USB_ETH_RNDIS=y +CONFIG_USB_ETH_EEM=y +CONFIG_USB_G_NCM=m +CONFIG_USB_GADGETFS=m +CONFIG_USB_FUNCTIONFS=m +CONFIG_USB_FUNCTIONFS_ETH=y +CONFIG_USB_FUNCTIONFS_RNDIS=y +CONFIG_USB_FUNCTIONFS_GENERIC=y +CONFIG_USB_MASS_STORAGE=m +CONFIG_USB_GADGET_TARGET=m +CONFIG_USB_G_SERIAL=m +CONFIG_USB_MIDI_GADGET=m +CONFIG_USB_G_PRINTER=m +CONFIG_USB_CDC_COMPOSITE=m +CONFIG_USB_G_NOKIA=m +CONFIG_USB_G_ACM_MS=m +CONFIG_USB_G_MULTI=m +CONFIG_USB_G_MULTI_RNDIS=y +CONFIG_USB_G_MULTI_CDC=y +CONFIG_USB_G_HID=m +CONFIG_USB_G_DBGP=m +# CONFIG_USB_G_DBGP_PRINTK is not set +CONFIG_USB_G_DBGP_SERIAL=y +CONFIG_USB_G_WEBCAM=m +CONFIG_USB_RAW_GADGET=m +# end of USB Gadget precomposed configurations + +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +CONFIG_TYPEC_MT6360=m +CONFIG_TYPEC_TCPCI_MAXIM=m +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_WCOVE=m +CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_CCG=m +CONFIG_UCSI_ACPI=m +CONFIG_TYPEC_TPS6598X=m +CONFIG_TYPEC_HD3SS3220=m +CONFIG_TYPEC_STUSB160X=m + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +CONFIG_TYPEC_MUX_PI3USB30532=m +CONFIG_TYPEC_MUX_INTEL_PMC=m +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +CONFIG_TYPEC_NVIDIA_ALTMODE=m +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=m +CONFIG_USB_ROLES_INTEL_XHCI=m +CONFIG_MMC=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +CONFIG_MMC_TEST=m +CONFIG_MMC_CRYPTO=y + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_SDHCI_F_SDH30=m +CONFIG_MMC_WBSD=m +CONFIG_MMC_ALCOR=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_SPI=m +CONFIG_MMC_SDRICOH_CS=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +CONFIG_MMC_USDHI6ROL0=m +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +CONFIG_MMC_HSQ=m +CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_MTK=m +CONFIG_MMC_SDHCI_XENON=m +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +CONFIG_MS_BLOCK=m + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=m +CONFIG_LEDS_CLASS_MULTICOLOR=m +CONFIG_LEDS_BRIGHTNESS_HW_CHANGED=y + +# +# LED drivers +# +CONFIG_LEDS_88PM860X=m +CONFIG_LEDS_APU=m +CONFIG_LEDS_AS3645A=m +CONFIG_LEDS_LM3530=m +CONFIG_LEDS_LM3532=m +CONFIG_LEDS_LM3533=m +CONFIG_LEDS_LM3642=m +CONFIG_LEDS_LM3601X=m +CONFIG_LEDS_MT6323=m +CONFIG_LEDS_PCA9532=m +CONFIG_LEDS_PCA9532_GPIO=y +CONFIG_LEDS_GPIO=m +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_LP3952=m +CONFIG_LEDS_LP50XX=m +CONFIG_LEDS_LP8788=m +CONFIG_LEDS_CLEVO_MAIL=m +CONFIG_LEDS_PCA955X=m +CONFIG_LEDS_PCA955X_GPIO=y +CONFIG_LEDS_PCA963X=m +CONFIG_LEDS_WM831X_STATUS=m +CONFIG_LEDS_WM8350=m +CONFIG_LEDS_DA903X=m +CONFIG_LEDS_DA9052=m +CONFIG_LEDS_DAC124S085=m +CONFIG_LEDS_PWM=m +CONFIG_LEDS_REGULATOR=m +CONFIG_LEDS_BD2802=m +CONFIG_LEDS_INTEL_SS4200=m +CONFIG_LEDS_ADP5520=m +CONFIG_LEDS_MC13783=m +CONFIG_LEDS_TCA6507=m +CONFIG_LEDS_TLC591XX=m +CONFIG_LEDS_MAX8997=m +CONFIG_LEDS_LM355x=m +CONFIG_LEDS_MENF21BMC=m + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_MLXCPLD=m +CONFIG_LEDS_MLXREG=m +CONFIG_LEDS_USER=m +CONFIG_LEDS_NIC78BX=m +CONFIG_LEDS_TI_LMU_COMMON=m +CONFIG_LEDS_LM36274=m +CONFIG_LEDS_TPS6105X=m +CONFIG_LEDS_SGM3140=m + +# +# Flash and Torch LED drivers +# +CONFIG_LEDS_RT8515=m + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_MTD=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_CPU=y +CONFIG_LEDS_TRIGGER_ACTIVITY=m +CONFIG_LEDS_TRIGGER_GPIO=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +CONFIG_LEDS_TRIGGER_PANIC=y +CONFIG_LEDS_TRIGGER_NETDEV=m +CONFIG_LEDS_TRIGGER_PATTERN=m +CONFIG_LEDS_TRIGGER_AUDIO=m +CONFIG_LEDS_TRIGGER_TTY=m +CONFIG_ACCESSIBILITY=y +CONFIG_A11Y_BRAILLE_CONSOLE=y + +# +# Speakup console speech +# +CONFIG_SPEAKUP=m +CONFIG_SPEAKUP_SYNTH_ACNTSA=m +CONFIG_SPEAKUP_SYNTH_APOLLO=m +CONFIG_SPEAKUP_SYNTH_AUDPTR=m +CONFIG_SPEAKUP_SYNTH_BNS=m +CONFIG_SPEAKUP_SYNTH_DECTLK=m +CONFIG_SPEAKUP_SYNTH_DECEXT=m +CONFIG_SPEAKUP_SYNTH_LTLK=m +CONFIG_SPEAKUP_SYNTH_SOFT=m +CONFIG_SPEAKUP_SYNTH_SPKOUT=m +CONFIG_SPEAKUP_SYNTH_TXPRT=m +CONFIG_SPEAKUP_SYNTH_DUMMY=m +# end of Speakup console speech + +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_MTHCA=m +CONFIG_INFINIBAND_MTHCA_DEBUG=y +CONFIG_INFINIBAND_QIB=m +CONFIG_INFINIBAND_QIB_DCA=y +CONFIG_INFINIBAND_CXGB4=m +CONFIG_INFINIBAND_EFA=m +CONFIG_INFINIBAND_I40IW=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_OCRDMA=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_INFINIBAND_USNIC=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_HFI1=m +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set +# CONFIG_SDMA_VERBOSITY is not set +CONFIG_INFINIBAND_QEDR=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_INFINIBAND_RTRS=m +CONFIG_INFINIBAND_RTRS_CLIENT=m +CONFIG_INFINIBAND_RTRS_SERVER=m +CONFIG_INFINIBAND_OPA_VNIC=m +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_GHES=y +CONFIG_EDAC_AMD64=m +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5000=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m +CONFIG_EDAC_PND2=m +CONFIG_EDAC_IGEN6=m +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +CONFIG_RTC_INTF_DEV_UIE_EMUL=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_88PM860X=m +CONFIG_RTC_DRV_88PM80X=m +CONFIG_RTC_DRV_ABB5ZES3=m +CONFIG_RTC_DRV_ABEOZ9=m +CONFIG_RTC_DRV_ABX80X=m +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_DS1307_CENTURY=y +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1374_WDT=y +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_LP8788=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_MAX8907=m +CONFIG_RTC_DRV_MAX8925=m +CONFIG_RTC_DRV_MAX8998=m +CONFIG_RTC_DRV_MAX8997=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF85063=m +CONFIG_RTC_DRV_PCF85363=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +CONFIG_RTC_DRV_PALMAS=m +CONFIG_RTC_DRV_TPS6586X=m +CONFIG_RTC_DRV_TPS65910=m +CONFIG_RTC_DRV_TPS80031=m +CONFIG_RTC_DRV_RC5T583=m +CONFIG_RTC_DRV_S35390A=m +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8010=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV3028=m +CONFIG_RTC_DRV_RV3032=m +CONFIG_RTC_DRV_RV8803=m +CONFIG_RTC_DRV_S5M=m +CONFIG_RTC_DRV_SD3078=m + +# +# SPI RTC drivers +# +CONFIG_RTC_DRV_M41T93=m +CONFIG_RTC_DRV_M41T94=m +CONFIG_RTC_DRV_DS1302=m +CONFIG_RTC_DRV_DS1305=m +CONFIG_RTC_DRV_DS1343=m +CONFIG_RTC_DRV_DS1347=m +CONFIG_RTC_DRV_DS1390=m +CONFIG_RTC_DRV_MAX6916=m +CONFIG_RTC_DRV_R9701=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_RS5C348=m +CONFIG_RTC_DRV_MAX6902=m +CONFIG_RTC_DRV_PCF2123=m +CONFIG_RTC_DRV_MCP795=m +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_RV3029C2=m +CONFIG_RTC_DRV_RV3029_HWMON=y +CONFIG_RTC_DRV_RX6110=m + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1685_FAMILY=m +CONFIG_RTC_DRV_DS1685=y +# CONFIG_RTC_DRV_DS1689 is not set +# CONFIG_RTC_DRV_DS17285 is not set +# CONFIG_RTC_DRV_DS17485 is not set +# CONFIG_RTC_DRV_DS17885 is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_DA9052=m +CONFIG_RTC_DRV_DA9055=m +CONFIG_RTC_DRV_DA9063=m +CONFIG_RTC_DRV_STK17TA8=m +CONFIG_RTC_DRV_M48T86=m +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_BQ4802=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_V3020=m +CONFIG_RTC_DRV_WM831X=m +CONFIG_RTC_DRV_WM8350=m +CONFIG_RTC_DRV_PCF50633=m +CONFIG_RTC_DRV_CROS_EC=m + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_FTRTC010=m +CONFIG_RTC_DRV_PCAP=m +CONFIG_RTC_DRV_MC13XXX=m +CONFIG_RTC_DRV_MT6397=m + +# +# HID Sensor RTC drivers +# +CONFIG_RTC_DRV_HID_SENSOR_TIME=m +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_RTC_DRV_WILCO_EC=m +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +CONFIG_ALTERA_MSGDMA=m +CONFIG_INTEL_IDMA64=m +CONFIG_INTEL_IDXD=m +CONFIG_INTEL_IDXD_SVM=y +# CONFIG_INTEL_IDXD_PERFMON is not set +CONFIG_INTEL_IOATDMA=m +CONFIG_PLX_DMA=m +CONFIG_XILINX_ZYNQMP_DPDMA=m +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC_CORE=y +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=y +CONFIG_DW_EDMA=m +CONFIG_DW_EDMA_PCIE=m +CONFIG_HSU_DMA=y +CONFIG_SF_PDMA=m +CONFIG_INTEL_LDMA=y + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +CONFIG_UDMABUF=y +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +CONFIG_DMABUF_HEAPS=y +CONFIG_DMABUF_HEAPS_SYSTEM=y +CONFIG_DMABUF_HEAPS_CMA=y +# end of DMABUF options + +CONFIG_DCA=m +CONFIG_AUXDISPLAY=y +CONFIG_CHARLCD=m +CONFIG_HD44780_COMMON=m +CONFIG_HD44780=m +CONFIG_KS0108=m +CONFIG_KS0108_PORT=0x378 +CONFIG_KS0108_DELAY=2 +CONFIG_CFAG12864B=m +CONFIG_CFAG12864B_RATE=20 +CONFIG_IMG_ASCII_LCD=m +CONFIG_LCD2S=m +CONFIG_PARPORT_PANEL=m +CONFIG_PANEL_PARPORT=0 +CONFIG_PANEL_PROFILE=5 +# CONFIG_PANEL_CHANGE_MESSAGE is not set +# CONFIG_CHARLCD_BL_OFF is not set +# CONFIG_CHARLCD_BL_ON is not set +CONFIG_CHARLCD_BL_FLASH=y +CONFIG_PANEL=m +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_UIO_NETX=m +CONFIG_UIO_PRUSS=m +CONFIG_UIO_MF624=m +CONFIG_UIO_HV_GENERIC=m +# CONFIG_UIO_DFL is not set +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_VIRQFD=m +CONFIG_VFIO=m +# CONFIG_VFIO_NOIOMMU is not set +CONFIG_VFIO_PCI=m +CONFIG_VFIO_PCI_VGA=y +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI_IGD=y +CONFIG_VFIO_MDEV=m +CONFIG_VFIO_MDEV_DEVICE=m +CONFIG_IRQ_BYPASS_MANAGER=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VBOXGUEST=m +CONFIG_NITRO_ENCLAVES=m +CONFIG_ACRN_HSM=m +CONFIG_VIRTIO=y +CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS=y +CONFIG_VIRTIO_PCI_LIB=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_VDPA=m +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +CONFIG_VDPA=m +CONFIG_VDPA_SIM=m +CONFIG_VDPA_SIM_NET=m +# CONFIG_VDPA_SIM_BLOCK is not set +CONFIG_IFCVF=m +CONFIG_MLX5_VDPA=y +CONFIG_MLX5_VDPA_NET=m +# CONFIG_VP_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_RING=m +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_VDPA=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_HYPERV=m +CONFIG_HYPERV_TIMER=y +CONFIG_HYPERV_UTILS=m +CONFIG_HYPERV_BALLOON=m +# end of Microsoft Hyper-V guest support + +# +# Xen driver support +# +CONFIG_XEN_BALLOON=y +CONFIG_XEN_BALLOON_MEMORY_HOTPLUG=y +CONFIG_XEN_MEMORY_HOTPLUG_LIMIT=512 +CONFIG_XEN_SCRUB_PAGES_DEFAULT=y +CONFIG_XEN_DEV_EVTCHN=m +CONFIG_XEN_BACKEND=y +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +CONFIG_XEN_GNTDEV=m +CONFIG_XEN_GNTDEV_DMABUF=y +CONFIG_XEN_GRANT_DEV_ALLOC=m +CONFIG_XEN_GRANT_DMA_ALLOC=y +CONFIG_SWIOTLB_XEN=y +CONFIG_XEN_PCIDEV_BACKEND=m +CONFIG_XEN_PVCALLS_FRONTEND=m +CONFIG_XEN_PVCALLS_BACKEND=y +CONFIG_XEN_SCSI_BACKEND=m +CONFIG_XEN_PRIVCMD=m +CONFIG_XEN_ACPI_PROCESSOR=m +CONFIG_XEN_MCE_LOG=y +CONFIG_XEN_HAVE_PVMMU=y +CONFIG_XEN_EFI=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_ACPI=y +CONFIG_XEN_SYMS=y +CONFIG_XEN_HAVE_VPMU=y +CONFIG_XEN_FRONT_PGDIR_SHBUF=m +CONFIG_XEN_UNPOPULATED_ALLOC=y +# end of Xen driver support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +CONFIG_STAGING=y +CONFIG_PRISM2_USB=m +CONFIG_RTL8192U=m +CONFIG_RTLLIB=m +CONFIG_RTLLIB_CRYPTO_CCMP=m +CONFIG_RTLLIB_CRYPTO_TKIP=m +CONFIG_RTLLIB_CRYPTO_WEP=m +CONFIG_RTL8192E=m +CONFIG_RTL8723BS=m +CONFIG_R8712U=m +CONFIG_R8188EU=m +CONFIG_88EU_AP_MODE=y +CONFIG_RTS5208=m +CONFIG_VT6655=m +CONFIG_VT6656=m + +# +# IIO staging drivers +# + +# +# Accelerometers +# +CONFIG_ADIS16203=m +CONFIG_ADIS16240=m +# end of Accelerometers + +# +# Analog to digital converters +# +CONFIG_AD7816=m +CONFIG_AD7280=m +# end of Analog to digital converters + +# +# Analog digital bi-direction converters +# +CONFIG_ADT7316=m +CONFIG_ADT7316_SPI=m +CONFIG_ADT7316_I2C=m +# end of Analog digital bi-direction converters + +# +# Capacitance to digital converters +# +CONFIG_AD7746=m +# end of Capacitance to digital converters + +# +# Direct Digital Synthesis +# +CONFIG_AD9832=m +CONFIG_AD9834=m +# end of Direct Digital Synthesis + +# +# Network Analyzer, Impedance Converters +# +CONFIG_AD5933=m +# end of Network Analyzer, Impedance Converters + +# +# Active energy metering IC +# +CONFIG_ADE7854=m +CONFIG_ADE7854_I2C=m +CONFIG_ADE7854_SPI=m +# end of Active energy metering IC + +# +# Resolver to digital converters +# +CONFIG_AD2S1210=m +# end of Resolver to digital converters +# end of IIO staging drivers + +# CONFIG_FB_SM750 is not set +CONFIG_STAGING_MEDIA=y +CONFIG_INTEL_ATOMISP=y +CONFIG_VIDEO_ATOMISP=m +CONFIG_VIDEO_ATOMISP_ISP2401=y +CONFIG_VIDEO_ATOMISP_OV2722=m +CONFIG_VIDEO_ATOMISP_GC2235=m +CONFIG_VIDEO_ATOMISP_MSRLIST_HELPER=m +CONFIG_VIDEO_ATOMISP_MT9M114=m +CONFIG_VIDEO_ATOMISP_GC0310=m +CONFIG_VIDEO_ATOMISP_OV2680=m +CONFIG_VIDEO_ATOMISP_OV5693=m +CONFIG_VIDEO_ATOMISP_LM3554=m +# CONFIG_VIDEO_ZORAN is not set +CONFIG_VIDEO_IPU3_IMGU=m + +# +# Android +# +# end of Android + +CONFIG_LTE_GDM724X=m +CONFIG_FIREWIRE_SERIAL=m +CONFIG_FWTTY_MAX_TOTAL_PORTS=64 +CONFIG_FWTTY_MAX_CARD_PORTS=32 +CONFIG_GS_FPGABOOT=m +CONFIG_UNISYSSPAR=y +CONFIG_UNISYS_VISORNIC=m +CONFIG_UNISYS_VISORINPUT=m +CONFIG_UNISYS_VISORHBA=m +# CONFIG_FB_TFT is not set +CONFIG_MOST_COMPONENTS=m +CONFIG_MOST_NET=m +CONFIG_MOST_VIDEO=m +CONFIG_MOST_I2C=m +CONFIG_KS7010=m +CONFIG_PI433=m +CONFIG_FIELDBUS_DEV=m +CONFIG_KPC2000=y +CONFIG_KPC2000_CORE=m +CONFIG_KPC2000_SPI=m +CONFIG_KPC2000_I2C=m +CONFIG_KPC2000_DMA=m +CONFIG_QLGE=m +CONFIG_WFX=m +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ACPI_WMI=m +CONFIG_WMI_BMOF=m +CONFIG_HUAWEI_WMI=m +CONFIG_INTEL_WMI_SBL_FW_UPDATE=m +CONFIG_INTEL_WMI_THUNDERBOLT=m +CONFIG_MXM_WMI=m +CONFIG_PEAQ_WMI=m +CONFIG_XIAOMI_WMI=m +# CONFIG_GIGABYTE_WMI is not set +CONFIG_ACERHDF=m +CONFIG_ACER_WIRELESS=m +CONFIG_ACER_WMI=m +CONFIG_AMD_PMC=m +# CONFIG_ADV_SWBUTTON is not set +CONFIG_APPLE_GMUX=m +CONFIG_ASUS_LAPTOP=m +CONFIG_ASUS_WIRELESS=m +CONFIG_ASUS_WMI=m +CONFIG_ASUS_NB_WMI=m +CONFIG_EEEPC_LAPTOP=m +CONFIG_EEEPC_WMI=m +CONFIG_X86_PLATFORM_DRIVERS_DELL=y +CONFIG_ALIENWARE_WMI=m +CONFIG_DCDBAS=m +CONFIG_DELL_LAPTOP=m +# CONFIG_DELL_RBU is not set +CONFIG_DELL_RBTN=m +CONFIG_DELL_SMBIOS=m +CONFIG_DELL_SMBIOS_WMI=y +CONFIG_DELL_SMBIOS_SMM=y +CONFIG_DELL_SMO8800=m +CONFIG_DELL_WMI=m +CONFIG_DELL_WMI_AIO=m +CONFIG_DELL_WMI_DESCRIPTOR=m +CONFIG_DELL_WMI_LED=m +CONFIG_DELL_WMI_SYSMAN=m +CONFIG_AMILO_RFKILL=m +CONFIG_FUJITSU_LAPTOP=m +CONFIG_FUJITSU_TABLET=m +CONFIG_GPD_POCKET_FAN=m +CONFIG_HP_ACCEL=m +CONFIG_HP_WIRELESS=m +CONFIG_HP_WMI=m +CONFIG_IBM_RTL=m +CONFIG_IDEAPAD_LAPTOP=m +CONFIG_SENSORS_HDAPS=m +CONFIG_THINKPAD_ACPI=m +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set +# CONFIG_THINKPAD_ACPI_DEBUG is not set +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +CONFIG_INTEL_ATOMISP2_LED=m +CONFIG_INTEL_CHT_INT33FE=m +CONFIG_INTEL_HID_EVENT=m +CONFIG_INTEL_INT0002_VGPIO=m +CONFIG_INTEL_MENLOW=m +CONFIG_INTEL_OAKTRAIL=m +CONFIG_INTEL_VBTN=m +CONFIG_MSI_LAPTOP=m +CONFIG_MSI_WMI=m +CONFIG_PCENGINES_APU2=m +CONFIG_SAMSUNG_LAPTOP=m +CONFIG_SAMSUNG_Q10=m +CONFIG_ACPI_TOSHIBA=m +CONFIG_TOSHIBA_BT_RFKILL=m +CONFIG_TOSHIBA_HAPS=m +CONFIG_TOSHIBA_WMI=m +CONFIG_ACPI_CMPC=m +CONFIG_COMPAL_LAPTOP=m +CONFIG_LG_LAPTOP=m +CONFIG_PANASONIC_LAPTOP=m +CONFIG_SONY_LAPTOP=m +CONFIG_SONYPI_COMPAT=y +CONFIG_SYSTEM76_ACPI=m +CONFIG_TOPSTAR_LAPTOP=m +CONFIG_I2C_MULTI_INSTANTIATE=m +CONFIG_MLX_PLATFORM=m +CONFIG_TOUCHSCREEN_DMI=y +CONFIG_INTEL_IPS=m +CONFIG_INTEL_RST=m +CONFIG_INTEL_SMARTCONNECT=m + +# +# Intel Speed Select Technology interface support +# +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m +# end of Intel Speed Select Technology interface support + +CONFIG_INTEL_TURBO_MAX_3=y +CONFIG_INTEL_UNCORE_FREQ_CONTROL=m +CONFIG_INTEL_BXTWC_PMIC_TMU=m +CONFIG_INTEL_CHTDC_TI_PWRBTN=m +CONFIG_INTEL_MRFLD_PWRBTN=m +CONFIG_INTEL_PMC_CORE=y +CONFIG_INTEL_PMT_CLASS=m +CONFIG_INTEL_PMT_TELEMETRY=m +CONFIG_INTEL_PMT_CRASHLOG=m +CONFIG_INTEL_PUNIT_IPC=m +CONFIG_INTEL_SCU_IPC=y +CONFIG_INTEL_SCU=y +CONFIG_INTEL_SCU_PCI=y +CONFIG_INTEL_SCU_PLATFORM=m +CONFIG_INTEL_SCU_IPC_UTIL=m +CONFIG_INTEL_TELEMETRY=m +CONFIG_PMC_ATOM=y +CONFIG_CHROME_PLATFORMS=y +CONFIG_CHROMEOS_LAPTOP=m +CONFIG_CHROMEOS_PSTORE=m +CONFIG_CHROMEOS_TBMC=m +CONFIG_CROS_EC=m +CONFIG_CROS_EC_I2C=m +CONFIG_CROS_EC_ISHTP=m +CONFIG_CROS_EC_SPI=m +CONFIG_CROS_EC_LPC=m +CONFIG_CROS_EC_PROTO=y +CONFIG_CROS_KBD_LED_BACKLIGHT=m +CONFIG_CROS_EC_CHARDEV=m +CONFIG_CROS_EC_LIGHTBAR=m +CONFIG_CROS_EC_DEBUGFS=m +CONFIG_CROS_EC_SENSORHUB=m +CONFIG_CROS_EC_SYSFS=m +CONFIG_CROS_EC_TYPEC=m +CONFIG_CROS_USBPD_LOGGER=m +CONFIG_CROS_USBPD_NOTIFY=m +CONFIG_WILCO_EC=m +CONFIG_WILCO_EC_DEBUGFS=m +CONFIG_WILCO_EC_EVENTS=m +CONFIG_WILCO_EC_TELEMETRY=m +CONFIG_MELLANOX_PLATFORM=y +CONFIG_MLXREG_HOTPLUG=m +CONFIG_MLXREG_IO=m +CONFIG_SURFACE_PLATFORMS=y +CONFIG_SURFACE3_WMI=m +CONFIG_SURFACE_3_BUTTON=m +CONFIG_SURFACE_3_POWER_OPREGION=m +CONFIG_SURFACE_ACPI_NOTIFY=m +CONFIG_SURFACE_AGGREGATOR_CDEV=m +# CONFIG_SURFACE_AGGREGATOR_REGISTRY is not set +# CONFIG_SURFACE_DTX is not set +CONFIG_SURFACE_GPE=m +CONFIG_SURFACE_HOTPLUG=m +CONFIG_SURFACE_PRO3_BUTTON=m +CONFIG_SURFACE_AGGREGATOR=m +CONFIG_SURFACE_AGGREGATOR_BUS=y +# CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION is not set +CONFIG_HAVE_CLK=y +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +CONFIG_COMMON_CLK_WM831X=m +CONFIG_COMMON_CLK_MAX9485=m +CONFIG_COMMON_CLK_SI5341=m +CONFIG_COMMON_CLK_SI5351=m +CONFIG_COMMON_CLK_SI544=m +CONFIG_COMMON_CLK_CDCE706=m +CONFIG_COMMON_CLK_CS2000_CP=m +CONFIG_COMMON_CLK_S2MPS11=m +CONFIG_CLK_TWL6040=m +CONFIG_COMMON_CLK_PALMAS=m +CONFIG_COMMON_CLK_PWM=m +CONFIG_XILINX_VCU=m +CONFIG_HWSPINLOCK=y + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_PCC=y +CONFIG_ALTERA_MBOX=m +CONFIG_IOMMU_IOVA=y +CONFIG_IOASID=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_IOMMU_DMA=y +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_V2=y +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +CONFIG_INTEL_IOMMU_SVM=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y +CONFIG_IRQ_REMAP=y +CONFIG_HYPERV_IOMMU=y + +# +# Remoteproc drivers +# +CONFIG_REMOTEPROC=y +CONFIG_REMOTEPROC_CDEV=y +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +CONFIG_RPMSG=m +CONFIG_RPMSG_CHAR=m +CONFIG_RPMSG_NS=m +CONFIG_RPMSG_QCOM_GLINK=m +CONFIG_RPMSG_QCOM_GLINK_RPM=m +CONFIG_RPMSG_VIRTIO=m +# end of Rpmsg drivers + +CONFIG_SOUNDWIRE=m + +# +# SoundWire Devices +# +CONFIG_SOUNDWIRE_CADENCE=m +CONFIG_SOUNDWIRE_INTEL=m +CONFIG_SOUNDWIRE_QCOM=m +CONFIG_SOUNDWIRE_GENERIC_ALLOCATION=m + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# end of Enable LiteX SoC Builder specific drivers + +# +# Qualcomm SoC drivers +# +CONFIG_QCOM_QMI_HELPERS=m +# end of Qualcomm SoC drivers + +CONFIG_SOC_TI=y + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=m +CONFIG_DEVFREQ_GOV_PERFORMANCE=m +CONFIG_DEVFREQ_GOV_POWERSAVE=m +CONFIG_DEVFREQ_GOV_USERSPACE=m +CONFIG_DEVFREQ_GOV_PASSIVE=m + +# +# DEVFREQ Drivers +# +CONFIG_PM_DEVFREQ_EVENT=y +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +CONFIG_EXTCON_ADC_JACK=m +CONFIG_EXTCON_AXP288=m +CONFIG_EXTCON_FSA9480=m +CONFIG_EXTCON_GPIO=m +CONFIG_EXTCON_INTEL_INT3496=m +CONFIG_EXTCON_INTEL_CHT_WC=m +CONFIG_EXTCON_INTEL_MRFLD=m +CONFIG_EXTCON_MAX14577=m +CONFIG_EXTCON_MAX3355=m +CONFIG_EXTCON_MAX77693=m +CONFIG_EXTCON_MAX77843=m +CONFIG_EXTCON_MAX8997=m +CONFIG_EXTCON_PALMAS=m +CONFIG_EXTCON_PTN5150=m +CONFIG_EXTCON_RT8973A=m +CONFIG_EXTCON_SM5502=m +CONFIG_EXTCON_USB_GPIO=m +CONFIG_EXTCON_USBC_CROS_EC=m +CONFIG_EXTCON_USBC_TUSB320=m +CONFIG_MEMORY=y +CONFIG_FPGA_DFL_EMIF=m +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +CONFIG_IIO_BUFFER_CB=m +CONFIG_IIO_BUFFER_DMA=m +CONFIG_IIO_BUFFER_DMAENGINE=m +CONFIG_IIO_BUFFER_HW_CONSUMER=m +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +CONFIG_IIO_CONFIGFS=m +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +CONFIG_IIO_SW_DEVICE=m +CONFIG_IIO_SW_TRIGGER=m +CONFIG_IIO_TRIGGERED_EVENT=m + +# +# Accelerometers +# +CONFIG_ADIS16201=m +CONFIG_ADIS16209=m +CONFIG_ADXL372=m +CONFIG_ADXL372_SPI=m +CONFIG_ADXL372_I2C=m +CONFIG_BMA220=m +CONFIG_BMA400=m +CONFIG_BMA400_I2C=m +CONFIG_BMA400_SPI=m +CONFIG_BMC150_ACCEL=m +CONFIG_BMC150_ACCEL_I2C=m +CONFIG_BMC150_ACCEL_SPI=m +# CONFIG_BMI088_ACCEL is not set +CONFIG_DA280=m +CONFIG_DA311=m +CONFIG_DMARD09=m +CONFIG_DMARD10=m +CONFIG_HID_SENSOR_ACCEL_3D=m +CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m +CONFIG_IIO_ST_ACCEL_3AXIS=m +CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m +CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m +CONFIG_KXSD9=m +CONFIG_KXSD9_SPI=m +CONFIG_KXSD9_I2C=m +CONFIG_KXCJK1013=m +CONFIG_MC3230=m +CONFIG_MMA7455=m +CONFIG_MMA7455_I2C=m +CONFIG_MMA7455_SPI=m +CONFIG_MMA7660=m +CONFIG_MMA8452=m +CONFIG_MMA9551_CORE=m +CONFIG_MMA9551=m +CONFIG_MMA9553=m +CONFIG_MXC4005=m +CONFIG_MXC6255=m +CONFIG_SCA3000=m +CONFIG_STK8312=m +CONFIG_STK8BA50=m +# end of Accelerometers + +# +# Analog to digital converters +# +CONFIG_AD_SIGMA_DELTA=m +CONFIG_AD7091R5=m +CONFIG_AD7124=m +CONFIG_AD7192=m +CONFIG_AD7266=m +CONFIG_AD7291=m +CONFIG_AD7292=m +CONFIG_AD7298=m +CONFIG_AD7476=m +CONFIG_AD7606=m +CONFIG_AD7606_IFACE_PARALLEL=m +CONFIG_AD7606_IFACE_SPI=m +CONFIG_AD7766=m +CONFIG_AD7768_1=m +CONFIG_AD7780=m +CONFIG_AD7791=m +CONFIG_AD7793=m +CONFIG_AD7887=m +CONFIG_AD7923=m +CONFIG_AD7949=m +CONFIG_AD799X=m +CONFIG_AXP20X_ADC=m +CONFIG_AXP288_ADC=m +CONFIG_CC10001_ADC=m +CONFIG_DA9150_GPADC=m +CONFIG_DLN2_ADC=m +CONFIG_HI8435=m +CONFIG_HX711=m +CONFIG_INA2XX_ADC=m +CONFIG_INTEL_MRFLD_ADC=m +CONFIG_LP8788_ADC=m +CONFIG_LTC2471=m +CONFIG_LTC2485=m +CONFIG_LTC2496=m +CONFIG_LTC2497=m +CONFIG_MAX1027=m +CONFIG_MAX11100=m +CONFIG_MAX1118=m +CONFIG_MAX1241=m +CONFIG_MAX1363=m +CONFIG_MAX9611=m +CONFIG_MCP320X=m +CONFIG_MCP3422=m +CONFIG_MCP3911=m +CONFIG_MEDIATEK_MT6360_ADC=m +CONFIG_MEN_Z188_ADC=m +CONFIG_MP2629_ADC=m +CONFIG_NAU7802=m +CONFIG_PALMAS_GPADC=m +CONFIG_TI_ADC081C=m +CONFIG_TI_ADC0832=m +CONFIG_TI_ADC084S021=m +CONFIG_TI_ADC12138=m +CONFIG_TI_ADC108S102=m +CONFIG_TI_ADC128S052=m +CONFIG_TI_ADC161S626=m +CONFIG_TI_ADS1015=m +CONFIG_TI_ADS7950=m +# CONFIG_TI_ADS131E08 is not set +CONFIG_TI_AM335X_ADC=m +CONFIG_TI_TLC4541=m +CONFIG_TWL4030_MADC=m +CONFIG_TWL6030_GPADC=m +CONFIG_VIPERBOARD_ADC=m +CONFIG_XILINX_XADC=m +# end of Analog to digital converters + +# +# Analog Front Ends +# +# end of Analog Front Ends + +# +# Amplifiers +# +CONFIG_AD8366=m +CONFIG_HMC425=m +# end of Amplifiers + +# +# Capacitance to digital converters +# +CONFIG_AD7150=m +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +CONFIG_ATLAS_PH_SENSOR=m +CONFIG_ATLAS_EZO_SENSOR=m +CONFIG_BME680=m +CONFIG_BME680_I2C=m +CONFIG_BME680_SPI=m +CONFIG_CCS811=m +CONFIG_IAQCORE=m +CONFIG_PMS7003=m +CONFIG_SCD30_CORE=m +CONFIG_SCD30_I2C=m +CONFIG_SCD30_SERIAL=m +CONFIG_SENSIRION_SGP30=m +CONFIG_SPS30=m +CONFIG_VZ89X=m +# end of Chemical Sensors + +CONFIG_IIO_CROS_EC_SENSORS_CORE=m +CONFIG_IIO_CROS_EC_SENSORS=m +CONFIG_IIO_CROS_EC_SENSORS_LID_ANGLE=m + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +CONFIG_IIO_MS_SENSORS_I2C=m + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +CONFIG_IIO_SSP_SENSORS_COMMONS=m +CONFIG_IIO_SSP_SENSORHUB=m +# end of SSP Sensor Common + +CONFIG_IIO_ST_SENSORS_I2C=m +CONFIG_IIO_ST_SENSORS_SPI=m +CONFIG_IIO_ST_SENSORS_CORE=m + +# +# Digital to analog converters +# +CONFIG_AD5064=m +CONFIG_AD5360=m +CONFIG_AD5380=m +CONFIG_AD5421=m +CONFIG_AD5446=m +CONFIG_AD5449=m +CONFIG_AD5592R_BASE=m +CONFIG_AD5592R=m +CONFIG_AD5593R=m +CONFIG_AD5504=m +CONFIG_AD5624R_SPI=m +CONFIG_AD5686=m +CONFIG_AD5686_SPI=m +CONFIG_AD5696_I2C=m +CONFIG_AD5755=m +CONFIG_AD5758=m +CONFIG_AD5761=m +CONFIG_AD5764=m +CONFIG_AD5766=m +CONFIG_AD5770R=m +CONFIG_AD5791=m +CONFIG_AD7303=m +CONFIG_AD8801=m +CONFIG_DS4424=m +CONFIG_LTC1660=m +CONFIG_LTC2632=m +CONFIG_M62332=m +CONFIG_MAX517=m +CONFIG_MCP4725=m +CONFIG_MCP4922=m +CONFIG_TI_DAC082S085=m +CONFIG_TI_DAC5571=m +CONFIG_TI_DAC7311=m +CONFIG_TI_DAC7612=m +# end of Digital to analog converters + +# +# IIO dummy driver +# +# CONFIG_IIO_SIMPLE_DUMMY is not set +# end of IIO dummy driver + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +CONFIG_AD9523=m +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +CONFIG_ADF4350=m +CONFIG_ADF4371=m +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +CONFIG_ADIS16080=m +CONFIG_ADIS16130=m +CONFIG_ADIS16136=m +CONFIG_ADIS16260=m +CONFIG_ADXRS290=m +CONFIG_ADXRS450=m +CONFIG_BMG160=m +CONFIG_BMG160_I2C=m +CONFIG_BMG160_SPI=m +CONFIG_FXAS21002C=m +CONFIG_FXAS21002C_I2C=m +CONFIG_FXAS21002C_SPI=m +CONFIG_HID_SENSOR_GYRO_3D=m +CONFIG_MPU3050=m +CONFIG_MPU3050_I2C=m +CONFIG_IIO_ST_GYRO_3AXIS=m +CONFIG_IIO_ST_GYRO_I2C_3AXIS=m +CONFIG_IIO_ST_GYRO_SPI_3AXIS=m +CONFIG_ITG3200=m +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +CONFIG_AFE4403=m +CONFIG_AFE4404=m +CONFIG_MAX30100=m +CONFIG_MAX30102=m +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +CONFIG_AM2315=m +CONFIG_DHT11=m +CONFIG_HDC100X=m +CONFIG_HDC2010=m +CONFIG_HID_SENSOR_HUMIDITY=m +CONFIG_HTS221=m +CONFIG_HTS221_I2C=m +CONFIG_HTS221_SPI=m +CONFIG_HTU21=m +CONFIG_SI7005=m +CONFIG_SI7020=m +# end of Humidity sensors + +# +# Inertial measurement units +# +CONFIG_ADIS16400=m +CONFIG_ADIS16460=m +CONFIG_ADIS16475=m +CONFIG_ADIS16480=m +CONFIG_BMI160=m +CONFIG_BMI160_I2C=m +CONFIG_BMI160_SPI=m +CONFIG_FXOS8700=m +CONFIG_FXOS8700_I2C=m +CONFIG_FXOS8700_SPI=m +CONFIG_KMX61=m +CONFIG_INV_ICM42600=m +CONFIG_INV_ICM42600_I2C=m +CONFIG_INV_ICM42600_SPI=m +CONFIG_INV_MPU6050_IIO=m +CONFIG_INV_MPU6050_I2C=m +CONFIG_INV_MPU6050_SPI=m +CONFIG_IIO_ST_LSM6DSX=m +CONFIG_IIO_ST_LSM6DSX_I2C=m +CONFIG_IIO_ST_LSM6DSX_SPI=m +# end of Inertial measurement units + +CONFIG_IIO_ADIS_LIB=m +CONFIG_IIO_ADIS_LIB_BUFFER=y + +# +# Light sensors +# +CONFIG_ACPI_ALS=m +CONFIG_ADJD_S311=m +CONFIG_ADUX1020=m +CONFIG_AL3010=m +CONFIG_AL3320A=m +CONFIG_APDS9300=m +CONFIG_APDS9960=m +CONFIG_AS73211=m +CONFIG_BH1750=m +CONFIG_BH1780=m +CONFIG_CM32181=m +CONFIG_CM3232=m +CONFIG_CM3323=m +CONFIG_CM36651=m +CONFIG_IIO_CROS_EC_LIGHT_PROX=m +CONFIG_GP2AP002=m +CONFIG_GP2AP020A00F=m +CONFIG_IQS621_ALS=m +CONFIG_SENSORS_ISL29018=m +CONFIG_SENSORS_ISL29028=m +CONFIG_ISL29125=m +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +CONFIG_JSA1212=m +CONFIG_RPR0521=m +CONFIG_SENSORS_LM3533=m +CONFIG_LTR501=m +CONFIG_LV0104CS=m +CONFIG_MAX44000=m +CONFIG_MAX44009=m +CONFIG_NOA1305=m +CONFIG_OPT3001=m +CONFIG_PA12203001=m +CONFIG_SI1133=m +CONFIG_SI1145=m +CONFIG_STK3310=m +CONFIG_ST_UVIS25=m +CONFIG_ST_UVIS25_I2C=m +CONFIG_ST_UVIS25_SPI=m +CONFIG_TCS3414=m +CONFIG_TCS3472=m +CONFIG_SENSORS_TSL2563=m +CONFIG_TSL2583=m +CONFIG_TSL2772=m +CONFIG_TSL4531=m +CONFIG_US5182D=m +CONFIG_VCNL4000=m +CONFIG_VCNL4035=m +CONFIG_VEML6030=m +CONFIG_VEML6070=m +CONFIG_VL6180=m +CONFIG_ZOPT2201=m +# end of Light sensors + +# +# Magnetometer sensors +# +CONFIG_AK8975=m +CONFIG_AK09911=m +CONFIG_BMC150_MAGN=m +CONFIG_BMC150_MAGN_I2C=m +CONFIG_BMC150_MAGN_SPI=m +CONFIG_MAG3110=m +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +CONFIG_MMC35240=m +CONFIG_IIO_ST_MAGN_3AXIS=m +CONFIG_IIO_ST_MAGN_I2C_3AXIS=m +CONFIG_IIO_ST_MAGN_SPI_3AXIS=m +CONFIG_SENSORS_HMC5843=m +CONFIG_SENSORS_HMC5843_I2C=m +CONFIG_SENSORS_HMC5843_SPI=m +CONFIG_SENSORS_RM3100=m +CONFIG_SENSORS_RM3100_I2C=m +CONFIG_SENSORS_RM3100_SPI=m +CONFIG_YAMAHA_YAS530=m +# end of Magnetometer sensors + +# +# Multiplexers +# +# end of Multiplexers + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +CONFIG_IIO_HRTIMER_TRIGGER=m +CONFIG_IIO_INTERRUPT_TRIGGER=m +CONFIG_IIO_TIGHTLOOP_TRIGGER=m +CONFIG_IIO_SYSFS_TRIGGER=m +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +CONFIG_IQS624_POS=m +CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE=m +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +CONFIG_AD5272=m +CONFIG_DS1803=m +CONFIG_MAX5432=m +CONFIG_MAX5481=m +CONFIG_MAX5487=m +CONFIG_MCP4018=m +CONFIG_MCP4131=m +CONFIG_MCP4531=m +CONFIG_MCP41010=m +CONFIG_TPL0102=m +# end of Digital potentiometers + +# +# Digital potentiostats +# +CONFIG_LMP91000=m +# end of Digital potentiostats + +# +# Pressure sensors +# +CONFIG_ABP060MG=m +CONFIG_BMP280=m +CONFIG_BMP280_I2C=m +CONFIG_BMP280_SPI=m +CONFIG_IIO_CROS_EC_BARO=m +CONFIG_DLHL60D=m +CONFIG_DPS310=m +CONFIG_HID_SENSOR_PRESS=m +CONFIG_HP03=m +CONFIG_ICP10100=m +CONFIG_MPL115=m +CONFIG_MPL115_I2C=m +CONFIG_MPL115_SPI=m +CONFIG_MPL3115=m +CONFIG_MS5611=m +CONFIG_MS5611_I2C=m +CONFIG_MS5611_SPI=m +CONFIG_MS5637=m +CONFIG_IIO_ST_PRESS=m +CONFIG_IIO_ST_PRESS_I2C=m +CONFIG_IIO_ST_PRESS_SPI=m +CONFIG_T5403=m +CONFIG_HP206C=m +CONFIG_ZPA2326=m +CONFIG_ZPA2326_I2C=m +CONFIG_ZPA2326_SPI=m +# end of Pressure sensors + +# +# Lightning sensors +# +CONFIG_AS3935=m +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_CROS_EC_MKBP_PROXIMITY is not set +CONFIG_ISL29501=m +CONFIG_LIDAR_LITE_V2=m +CONFIG_MB1232=m +CONFIG_PING=m +CONFIG_RFD77402=m +CONFIG_SRF04=m +CONFIG_SX9310=m +CONFIG_SX9500=m +CONFIG_SRF08=m +CONFIG_VCNL3020=m +CONFIG_VL53L0X_I2C=m +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +CONFIG_AD2S90=m +CONFIG_AD2S1200=m +# end of Resolver to digital converters + +# +# Temperature sensors +# +CONFIG_IQS620AT_TEMP=m +CONFIG_LTC2983=m +CONFIG_MAXIM_THERMOCOUPLE=m +CONFIG_HID_SENSOR_TEMP=m +CONFIG_MLX90614=m +CONFIG_MLX90632=m +CONFIG_TMP006=m +CONFIG_TMP007=m +CONFIG_TSYS01=m +CONFIG_TSYS02D=m +CONFIG_MAX31856=m +# end of Temperature sensors + +CONFIG_NTB=m +CONFIG_NTB_MSI=y +CONFIG_NTB_AMD=m +CONFIG_NTB_IDT=m +CONFIG_NTB_INTEL=m +CONFIG_NTB_EPF=m +CONFIG_NTB_SWITCHTEC=m +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_MSI_TEST is not set +CONFIG_NTB_TRANSPORT=m +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +CONFIG_PWM_CRC=y +CONFIG_PWM_CROS_EC=m +CONFIG_PWM_DWC=m +CONFIG_PWM_IQS620A=m +CONFIG_PWM_LP3943=m +CONFIG_PWM_LPSS=m +CONFIG_PWM_LPSS_PCI=m +CONFIG_PWM_LPSS_PLATFORM=m +CONFIG_PWM_PCA9685=m +CONFIG_PWM_TWL=m +CONFIG_PWM_TWL_LED=m + +# +# IRQ chip support +# +CONFIG_MADERA_IRQ=m +# end of IRQ chip support + +CONFIG_IPACK_BUS=m +CONFIG_BOARD_TPCI200=m +CONFIG_SERIAL_IPOCTAL=m +CONFIG_RESET_CONTROLLER=y +CONFIG_RESET_BRCMSTB_RESCAL=y +CONFIG_RESET_TI_SYSCON=m + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_USB_LGM_PHY=m +CONFIG_BCM_KONA_USB2_PHY=m +CONFIG_PHY_PXA_28NM_HSIC=m +CONFIG_PHY_PXA_28NM_USB2=m +CONFIG_PHY_CPCAP_USB=m +CONFIG_PHY_QCOM_USB_HS=m +CONFIG_PHY_QCOM_USB_HSIC=m +CONFIG_PHY_SAMSUNG_USB2=m +CONFIG_PHY_TUSB1210=m +CONFIG_PHY_INTEL_LGM_EMMC=m +# end of PHY Subsystem + +CONFIG_POWERCAP=y +CONFIG_INTEL_RAPL_CORE=m +CONFIG_INTEL_RAPL=m +CONFIG_IDLE_INJECT=y +CONFIG_DTPM=y +CONFIG_DTPM_CPU=y +CONFIG_MCB=m +CONFIG_MCB_PCI=m +CONFIG_MCB_LPC=m + +# +# Performance monitor support +# +# end of Performance monitor support + +CONFIG_RAS=y +CONFIG_RAS_CEC=y +# CONFIG_RAS_CEC_DEBUG is not set +CONFIG_USB4=m +# CONFIG_USB4_DEBUGFS_WRITE is not set +# CONFIG_USB4_DMA_TEST is not set + +# +# Android +# +# CONFIG_ANDROID is not set +# end of Android + +CONFIG_LIBNVDIMM=y +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_BLK=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_ND_PFN=m +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_HMEM_DEVICES=y +CONFIG_DEV_DAX_KMEM=m +CONFIG_DEV_DAX_PMEM_COMPAT=m +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y +CONFIG_RAVE_SP_EEPROM=m +CONFIG_NVMEM_RMEM=m + +# +# HW tracing support +# +CONFIG_STM=m +CONFIG_STM_PROTO_BASIC=m +CONFIG_STM_PROTO_SYS_T=m +# CONFIG_STM_DUMMY is not set +CONFIG_STM_SOURCE_CONSOLE=m +CONFIG_STM_SOURCE_HEARTBEAT=m +CONFIG_STM_SOURCE_FTRACE=m +CONFIG_INTEL_TH=m +CONFIG_INTEL_TH_PCI=m +CONFIG_INTEL_TH_ACPI=m +CONFIG_INTEL_TH_GTH=m +CONFIG_INTEL_TH_STH=m +CONFIG_INTEL_TH_MSU=m +CONFIG_INTEL_TH_PTI=m +# CONFIG_INTEL_TH_DEBUG is not set +# end of HW tracing support + +CONFIG_FPGA=m +CONFIG_ALTERA_PR_IP_CORE=m +CONFIG_FPGA_MGR_ALTERA_PS_SPI=m +CONFIG_FPGA_MGR_ALTERA_CVP=m +CONFIG_FPGA_MGR_XILINX_SPI=m +CONFIG_FPGA_MGR_MACHXO2_SPI=m +CONFIG_FPGA_BRIDGE=m +CONFIG_ALTERA_FREEZE_BRIDGE=m +CONFIG_XILINX_PR_DECOUPLER=m +CONFIG_FPGA_REGION=m +CONFIG_FPGA_DFL=m +CONFIG_FPGA_DFL_FME=m +CONFIG_FPGA_DFL_FME_MGR=m +CONFIG_FPGA_DFL_FME_BRIDGE=m +CONFIG_FPGA_DFL_FME_REGION=m +CONFIG_FPGA_DFL_AFU=m +CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000=m +CONFIG_FPGA_DFL_PCI=m +CONFIG_TEE=m + +# +# TEE drivers +# +CONFIG_AMDTEE=m +# end of TEE drivers + +CONFIG_MULTIPLEXER=m + +# +# Multiplexer drivers +# +CONFIG_MUX_ADG792A=m +CONFIG_MUX_ADGS1408=m +CONFIG_MUX_GPIO=m +# end of Multiplexer drivers + +CONFIG_PM_OPP=y +CONFIG_UNISYS_VISORBUS=m +CONFIG_SIOX=m +CONFIG_SIOX_BUS_GPIO=m +CONFIG_SLIMBUS=m +CONFIG_SLIM_QCOM_CTRL=m +CONFIG_INTERCONNECT=y +CONFIG_COUNTER=m +# CONFIG_INTERRUPT_CNT is not set +CONFIG_MOST=m +CONFIG_MOST_USB_HDM=m +CONFIG_MOST_CDEV=m +# CONFIG_MOST_SND is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_VALIDATE_FS_PARSER=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=m +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=m +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=m +CONFIG_REISERFS_FS=m +# CONFIG_REISERFS_CHECK is not set +CONFIG_REISERFS_PROC_INFO=y +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +CONFIG_JFS_STATISTICS=y +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_RT=y +CONFIG_XFS_ONLINE_SCRUB=y +CONFIG_XFS_ONLINE_REPAIR=y +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +CONFIG_NILFS2_FS=m +CONFIG_F2FS_FS=m +CONFIG_F2FS_STAT_FS=y +CONFIG_F2FS_FS_XATTR=y +CONFIG_F2FS_FS_POSIX_ACL=y +CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_CHECK_FS=y +# CONFIG_F2FS_FAULT_INJECTION is not set +CONFIG_F2FS_FS_COMPRESSION=y +CONFIG_F2FS_FS_LZO=y +CONFIG_F2FS_FS_LZ4=y +CONFIG_F2FS_FS_LZ4HC=y +CONFIG_F2FS_FS_ZSTD=y +CONFIG_F2FS_FS_LZORLE=y +CONFIG_ZONEFS_FS=m +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_MANDATORY_FILE_LOCKING is not set +CONFIG_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_ALGS=m +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y +CONFIG_FS_VERITY=y +# CONFIG_FS_VERITY_DEBUG is not set +CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=m +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=m +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_FUSE_DAX=y +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y + +# +# Caches +# +CONFIG_NETFS_SUPPORT=m +# CONFIG_NETFS_STATS is not set +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_FSCACHE_OBJECT_LIST=y +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_EXFAT_FS=m +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8" +# CONFIG_NTFS_FS is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_PROC_PID_ARCH_STATUS=y +CONFIG_PROC_CPU_RESCTRL=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_TMPFS_INODE64=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +CONFIG_ORANGEFS_FS=m +# CONFIG_ADFS_FS is not set +CONFIG_AFFS_FS=m +CONFIG_ECRYPT_FS=m +# CONFIG_ECRYPT_FS_MESSAGING is not set +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_BEFS_FS=m +# CONFIG_BEFS_DEBUG is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set +CONFIG_JFFS2_SUMMARY=y +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_FS_POSIX_ACL=y +CONFIG_JFFS2_FS_SECURITY=y +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_RTIME=y +CONFIG_UBIFS_FS=m +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UBIFS_FS_ZSTD=y +CONFIG_UBIFS_ATIME_SUPPORT=y +CONFIG_UBIFS_FS_XATTR=y +CONFIG_UBIFS_FS_SECURITY=y +CONFIG_UBIFS_FS_AUTHENTICATION=y +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +CONFIG_CRAMFS_MTD=y +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +# CONFIG_SQUASHFS_DECOMP_SINGLE is not set +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_ZSTD=y +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +CONFIG_MINIX_FS=m +CONFIG_OMFS_FS=m +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_DEFLATE_COMPRESS=m +CONFIG_PSTORE_LZO_COMPRESS=m +CONFIG_PSTORE_LZ4_COMPRESS=m +CONFIG_PSTORE_LZ4HC_COMPRESS=m +# CONFIG_PSTORE_842_COMPRESS is not set +CONFIG_PSTORE_ZSTD_COMPRESS=y +CONFIG_PSTORE_COMPRESS=y +# CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT is not set +# CONFIG_PSTORE_LZO_COMPRESS_DEFAULT is not set +# CONFIG_PSTORE_LZ4_COMPRESS_DEFAULT is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS_DEFAULT is not set +CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="zstd" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=m +CONFIG_PSTORE_ZONE=m +CONFIG_PSTORE_BLK=m +CONFIG_PSTORE_BLK_BLKDEV="" +CONFIG_PSTORE_BLK_KMSG_SIZE=64 +CONFIG_PSTORE_BLK_MAX_REASON=2 +# CONFIG_SYSV_FS is not set +CONFIG_UFS_FS=m +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_UFS_DEBUG is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_VBOXSF_FS=m +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +CONFIG_NFSD_V4_2_INTER_SSC=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_SUNRPC_SWAP=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CEPH_FS_SECURITY_LABEL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +# CONFIG_CIFS_WEAK_PW_HASH is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_CIFS_SWN_UPCALL=y +# CONFIG_CIFS_SMB_DIRECT is not set +CONFIG_CIFS_FSCACHE=y +CONFIG_CODA_FS=m +CONFIG_AFS_FS=m +# CONFIG_AFS_DEBUG is not set +CONFIG_AFS_FSCACHE=y +# CONFIG_AFS_DEBUG_CURSOR is not set +CONFIG_9P_FS=m +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +CONFIG_UNICODE=y +# CONFIG_UNICODE_NORMALIZATION_SELFTEST is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_REQUEST_CACHE=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_ENCRYPTED_KEYS=m +CONFIG_KEY_DH_OPERATIONS=y +CONFIG_KEY_NOTIFICATIONS=y +CONFIG_SECURITY_DMESG_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +# CONFIG_INTEL_TXT is not set +CONFIG_LSM_MMAP_MIN_ADDR=65536 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_FALLBACK=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +# CONFIG_SECURITY_SELINUX_DISABLE is not set +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +CONFIG_SECURITY_SMACK=y +CONFIG_SECURITY_SMACK_BRINGUP=y +CONFIG_SECURITY_SMACK_NETFILTER=y +CONFIG_SECURITY_SMACK_APPEND_SIGNALS=y +CONFIG_SECURITY_TOMOYO=y +CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048 +CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024 +# CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER is not set +CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/usr/bin/tomoyo-init" +CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/usr/lib/systemd/systemd" +# CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING is not set +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y +# CONFIG_SECURITY_APPARMOR_DEBUG is not set +CONFIG_SECURITY_LOADPIN=y +CONFIG_SECURITY_LOADPIN_ENFORCE=y +CONFIG_SECURITY_YAMA=y +CONFIG_SECURITY_SAFESETID=y +CONFIG_SECURITY_LOCKDOWN_LSM=y +# CONFIG_SECURITY_LOCKDOWN_LSM_EARLY is not set +CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y +# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set +# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set +# CONFIG_SECURITY_LANDLOCK is not set +# CONFIG_INTEGRITY is not set +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_SMACK is not set +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set +# CONFIG_DEFAULT_SECURITY_APPARMOR is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="lockdown,yama,bpf" + +# +# Kernel hardening options +# +CONFIG_GCC_PLUGIN_STRUCTLEAK=y + +# +# Memory initialization +# +# CONFIG_INIT_STACK_NONE is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF is not set +CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y +# CONFIG_GCC_PLUGIN_STRUCTLEAK_VERBOSE is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=m +CONFIG_CRYPTO_ENGINE=m + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +CONFIG_CRYPTO_ECRDSA=m +CONFIG_CRYPTO_SM2=m +CONFIG_CRYPTO_CURVE25519=m +CONFIG_CRYPTO_CURVE25519_X86=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_AEGIS128=m +CONFIG_CRYPTO_AEGIS128_AESNI_SSE2=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=m +CONFIG_CRYPTO_CFB=m +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_NHPOLY1305=m +CONFIG_CRYPTO_NHPOLY1305_SSE2=m +CONFIG_CRYPTO_NHPOLY1305_AVX2=m +CONFIG_CRYPTO_ADIANTUM=m +CONFIG_CRYPTO_ESSIV=m + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=m +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +CONFIG_CRYPTO_XXHASH=m +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_BLAKE2S=m +CONFIG_CRYPTO_BLAKE2S_X86=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +CONFIG_CRYPTO_GHASH=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_POLY1305_X86_64=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_SSSE3=m +CONFIG_CRYPTO_SHA256_SSSE3=m +CONFIG_CRYPTO_SHA512_SSSE3=m +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=m +CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AES_TI=m +CONFIG_CRYPTO_AES_NI_INTEL=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_DES3_EDE_X86_64=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CHACHA20_X86_64=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=y +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=y + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=m +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=m +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_USER_API_RNG=m +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=m +# CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE is not set +CONFIG_CRYPTO_STATS=y +CONFIG_CRYPTO_HASH_INFO=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=m +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=m +CONFIG_CRYPTO_LIB_BLAKE2S=m +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +CONFIG_CRYPTO_DEV_ATMEL_I2C=m +CONFIG_CRYPTO_DEV_ATMEL_ECC=m +CONFIG_CRYPTO_DEV_ATMEL_SHA204A=m +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_CRYPTO_DEV_CCP_DEBUGFS=y +CONFIG_CRYPTO_DEV_QAT=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +CONFIG_CRYPTO_DEV_QAT_C3XXX=m +CONFIG_CRYPTO_DEV_QAT_C62X=m +CONFIG_CRYPTO_DEV_QAT_4XXX=m +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m +CONFIG_CRYPTO_DEV_QAT_C62XVF=m +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_CRYPTO_DEV_SAFEXCEL=m +CONFIG_CRYPTO_DEV_AMLOGIC_GXL=m +CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE=m +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS8_PRIVATE_KEY_PARSER=m +CONFIG_TPM_KEY_PARSER=m +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +# CONFIG_RAID6_PQ_BENCHMARK is not set +CONFIG_LINEAR_RANGES=y +CONFIG_PACKING=y +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=m +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +CONFIG_CRC4=m +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=m +CONFIG_842_DECOMPRESS=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=y +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y +CONFIG_SWIOTLB=y +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_CMA=y +# CONFIG_DMA_PERNUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=0 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_DIMLIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +CONFIG_FONTS=y +# CONFIG_FONT_8x8 is not set +CONFIG_FONT_8x16=y +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_7x14 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_6x10 is not set +# CONFIG_FONT_10x18 is not set +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set +CONFIG_FONT_TER16x32=y +# CONFIG_FONT_6x8 is not set +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_COPY_MC=y +CONFIG_ARCH_STACKWALK=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# CONFIG_STRING_SELFTEST is not set +# end of Library routines + +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=m + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=4 +CONFIG_CONSOLE_LOGLEVEL_QUIET=1 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_COMPRESSED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +CONFIG_DEBUG_INFO_BTF=y +CONFIG_PAHOLE_HAS_SPLIT_BTF=y +CONFIG_DEBUG_INFO_BTF_MODULES=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x0 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_HAVE_ARCH_KCSAN=y +# end of Generic Kernel Debugging Instruments + +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_MISC is not set + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_PAGE_POISONING=y +# CONFIG_DEBUG_PAGE_REF is not set +CONFIG_DEBUG_RODATA_TEST=y +CONFIG_ARCH_HAS_DEBUG_WX=y +CONFIG_DEBUG_WX=y +CONFIG_GENERIC_PTDUMP=y +CONFIG_PTDUMP_CORE=y +# CONFIG_PTDUMP_DEBUGFS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +CONFIG_SCHED_STACK_END_CHECK=y +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VM_PGTABLE is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y +# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_STATIC_KEYS=y +CONFIG_KFENCE_SAMPLE_INTERVAL=100 +CONFIG_KFENCE_NUM_OBJECTS=255 +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_RCU_STRICT_GRACE_PERIOD is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +CONFIG_LATENCYTOP=y +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_OBJTOOL_MCOUNT=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +CONFIG_BOOTTIME_TRACING=y +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_MMIOTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_BPF_KPROBE_OVERRIDE=y +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +CONFIG_TRACING_MAP=y +CONFIG_SYNTH_EVENTS=y +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_MMIOTRACE_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SYNTH_EVENT_GEN_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_HIST_TRIGGERS_DEBUG is not set +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set +# CONFIG_SAMPLES is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +CONFIG_IO_STRICT_DEVMEM=y + +# +# x86 Debugging +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_EARLY_PRINTK_USB=y +# CONFIG_X86_VERBOSE_BOOTUP is not set +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_USB_XDBC=y +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +# CONFIG_X86_DEBUG_FPU is not set +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set +# end of x86 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_SORT is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_STRSCPY is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_HMM is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_TEST_FPU is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# CONFIG_HYPERV_TESTING is not set +# end of Kernel Testing and Coverage +# end of Kernel hacking diff --git a/linux-tkg-config/prepare b/linux-tkg-config/prepare index b37a5fb..df0ca0a 100644 --- a/linux-tkg-config/prepare +++ b/linux-tkg-config/prepare @@ -7,9 +7,12 @@ ver59=16 ver510=35 ver511=19 ver512=2 +ver513=rc1 _cpuschedselector() { - msg2 "Which CPU sched variant do you want to build/install? Select \"CFS\" (linux kernel's default) if unsure." + msg2 "Which CPU sched variant do you want to build/install?" + msg2 "Project C / BMQ is usually a good balance for gaming." + msg2 "Select \"CFS\" (linux kernel's default) if unsure." select CPUSCHED in "${_CPUSCHEDARRAY[@]}" do case $CPUSCHED in @@ -60,7 +63,7 @@ _tkg_initscript() { # to the rest of the script if [ -z "$_version" ] && [ ! -e "$_path"/versel ]; then msg2 "Which kernel version do you want to install?" - plain "1. 5.4.$ver54 LTS\n 2. 5.7.$ver57\n 3. 5.8.$ver58\n 4. 5.9.$ver59\n 5. 5.10.$ver510 LTS\n 6. 5.11.$ver511\n > 7. 5.12.$ver512 (latest stable)" + plain "1. 5.4.$ver54 LTS\n 2. 5.7.$ver57\n 3. 5.8.$ver58\n 4. 5.9.$ver59\n 5. 5.10.$ver510 LTS\n 6. 5.11.$ver511\n > 7. 5.12.$ver512 (latest stable)\n 8. 5.13 $ver513" read -rp "`echo $' choice[1-7?]'`" _VERSEL; case $_VERSEL in "1") @@ -93,6 +96,11 @@ _tkg_initscript() { echo "_basekernel=5.11" >> "$_path"/versel echo "_sub=${ver511}" >> "$_path"/versel ;; + "8") + echo "_basever=513" > "$_path"/versel + echo "_basekernel=5.13" >> "$_path"/versel + echo "_sub=${ver513}" >> "$_path"/versel + ;; *) echo "_basever=512" > "$_path"/versel echo "_basekernel=5.12" >> "$_path"/versel @@ -136,6 +144,11 @@ _tkg_initscript() { echo "_basekernel=5.12" >> "$_path"/versel echo "_sub=${ver512}" >> "$_path"/versel ;; + "5.13") + echo "_basever=513" > "$_path"/versel + echo "_basekernel=5.13" >> "$_path"/versel + echo "_sub=${ver513}" >> "$_path"/versel + ;; *) error "There is something wrong with your kernel version selection, exiting..." exit 1 @@ -192,6 +205,9 @@ _tkg_initscript() { elif [ "$_basever" = "512" ]; then _CPUSCHEDARRAY=("Project C / PDS" "Project C / BMQ" "MuQSS" "CFS") _CPUSCHEDVARARRAY=("pds" "bmq" "muqss" "MuQSS" "cfs") + elif [ "$_basever" = "513" ]; then + _CPUSCHEDARRAY=("Project C / PDS" "Project C / BMQ" "CFS") + _CPUSCHEDVARARRAY=("pds" "bmq" "cfs") else _CPUSCHEDARRAY=("CFS") _CPUSCHEDVARARRAY=("cfs") @@ -381,6 +397,8 @@ _tkg_srcprep() { cd ${wrksrc}/linux-${_kern_ver} fi + if [ -z $_debug ]; then + # graysky's cpu opts - https://github.com/graysky2/kernel_gcc_patch if [ "${_distro}" = "Arch" ]; then tkgpatch="$srcdir/more-uarches-for-kernel-${opt_ver}.patch" @@ -435,6 +453,8 @@ _tkg_srcprep() { rev=2 elif [ "$_basever" = "511" ]; then rev=3 + elif [ "$_basever" = "512" ]; then + rev=1 else rev=0 fi @@ -500,6 +520,8 @@ _tkg_srcprep() { tkgpatch="$srcdir/0003-glitched-cfs.patch" && _tkg_patcher fi + fi + if [ "$_distro" = "Void" ] && [[ "$_sub" = rc* ]]; then cd ${wrksrc}/linux-${_rc_kern_ver} elif [ "$_distro" = "Void" ] && [[ "$_sub" != rc* ]]; then @@ -525,6 +547,8 @@ _tkg_srcprep() { cat "${srcdir}/${_configfile}" > ./.config fi + if [ -z $_debug ]; then + # Set some -tkg defaults echo "# CONFIG_DYNAMIC_FAULT is not set" >> ./.config sed -i -e 's/CONFIG_DEFAULT_FQ_CODEL=y/# CONFIG_DEFAULT_FQ_CODEL is not set/' ./.config @@ -1250,6 +1274,8 @@ CONFIG_DEBUG_INFO_BTF_MODULES=y\n fi fi + fi + # Community patches if [ -n "$_community_patches" ]; then if [ ! -d "$_where/../community-patches" ]; then diff --git a/linux-tkg-patches/5.13/0001-add-sysctl-to-disallow-unprivileged-CLONE_NEWUSER-by.patch b/linux-tkg-patches/5.13/0001-add-sysctl-to-disallow-unprivileged-CLONE_NEWUSER-by.patch new file mode 100644 index 0000000..83240cb --- /dev/null +++ b/linux-tkg-patches/5.13/0001-add-sysctl-to-disallow-unprivileged-CLONE_NEWUSER-by.patch @@ -0,0 +1,156 @@ +From 5ec2dd3a095442ec1a21d86042a4994f2ba24e63 Mon Sep 17 00:00:00 2001 +Message-Id: <5ec2dd3a095442ec1a21d86042a4994f2ba24e63.1512651251.git.jan.steffens@gmail.com> +From: Serge Hallyn +Date: Fri, 31 May 2013 19:12:12 +0100 +Subject: [PATCH] add sysctl to disallow unprivileged CLONE_NEWUSER by default + +Signed-off-by: Serge Hallyn +[bwh: Remove unneeded binary sysctl bits] +Signed-off-by: Daniel Micay +--- + kernel/fork.c | 15 +++++++++++++++ + kernel/sysctl.c | 12 ++++++++++++ + kernel/user_namespace.c | 3 +++ + 3 files changed, 30 insertions(+) + +diff --git a/kernel/fork.c b/kernel/fork.c +index 07cc743698d3668e..4011d68a8ff9305c 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -102,6 +102,11 @@ + + #define CREATE_TRACE_POINTS + #include ++#ifdef CONFIG_USER_NS ++extern int unprivileged_userns_clone; ++#else ++#define unprivileged_userns_clone 0 ++#endif + + /* + * Minimum number of threads to boot the kernel +@@ -1555,6 +1560,10 @@ static __latent_entropy struct task_struct *copy_process( + if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) + return ERR_PTR(-EINVAL); + ++ if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) ++ if (!capable(CAP_SYS_ADMIN)) ++ return ERR_PTR(-EPERM); ++ + /* + * Thread groups must share signals as well, and detached threads + * can only be started up within the thread group. +@@ -2348,6 +2357,12 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) + if (unshare_flags & CLONE_NEWNS) + unshare_flags |= CLONE_FS; + ++ if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) { ++ err = -EPERM; ++ if (!capable(CAP_SYS_ADMIN)) ++ goto bad_unshare_out; ++ } ++ + err = check_unshare_flags(unshare_flags); + if (err) + goto bad_unshare_out; +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index b86520ed3fb60fbf..f7dab3760839f1a1 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -105,6 +105,9 @@ extern int core_uses_pid; + + #if defined(CONFIG_SYSCTL) + ++#ifdef CONFIG_USER_NS ++extern int unprivileged_userns_clone; ++#endif + /* Constants used for minimum and maximum */ + #ifdef CONFIG_LOCKUP_DETECTOR + static int sixty = 60; +@@ -513,6 +516,15 @@ static struct ctl_table kern_table[] = { + .proc_handler = proc_dointvec, + }, + #endif ++#ifdef CONFIG_USER_NS ++ { ++ .procname = "unprivileged_userns_clone", ++ .data = &unprivileged_userns_clone, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec, ++ }, ++#endif + #ifdef CONFIG_PROC_SYSCTL + { + .procname = "tainted", +diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c +index c490f1e4313b998a..dd03bd39d7bf194d 100644 +--- a/kernel/user_namespace.c ++++ b/kernel/user_namespace.c +@@ -24,6 +24,9 @@ + #include + #include + ++/* sysctl */ ++int unprivileged_userns_clone; ++ + static struct kmem_cache *user_ns_cachep __read_mostly; + static DEFINE_MUTEX(userns_state_mutex); + +-- +2.15.1 + +From b5202296055dd333db4425120d3f93ef4e6a0573 Mon Sep 17 00:00:00 2001 +From: "Jan Alexander Steffens (heftig)" +Date: Thu, 7 Dec 2017 13:50:48 +0100 +Subject: ZEN: Add CONFIG for unprivileged_userns_clone + +This way our default behavior continues to match the vanilla kernel. +--- + init/Kconfig | 16 ++++++++++++++++ + kernel/user_namespace.c | 4 ++++ + 2 files changed, 20 insertions(+) + +diff --git a/init/Kconfig b/init/Kconfig +index 4592bf7997c0..f3df02990aff 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1004,6 +1004,22 @@ config USER_NS + + If unsure, say N. + ++config USER_NS_UNPRIVILEGED ++ bool "Allow unprivileged users to create namespaces" ++ default y ++ depends on USER_NS ++ help ++ When disabled, unprivileged users will not be able to create ++ new namespaces. Allowing users to create their own namespaces ++ has been part of several recent local privilege escalation ++ exploits, so if you need user namespaces but are ++ paranoid^Wsecurity-conscious you want to disable this. ++ ++ This setting can be overridden at runtime via the ++ kernel.unprivileged_userns_clone sysctl. ++ ++ If unsure, say Y. ++ + config PID_NS + bool "PID Namespaces" + default y +diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c +index 6b9dbc257e34..107b17f0d528 100644 +--- a/kernel/user_namespace.c ++++ b/kernel/user_namespace.c +@@ -27,7 +27,11 @@ + #include + + /* sysctl */ ++#ifdef CONFIG_USER_NS_UNPRIVILEGED ++int unprivileged_userns_clone = 1; ++#else + int unprivileged_userns_clone; ++#endif + + static struct kmem_cache *user_ns_cachep __read_mostly; + static DEFINE_MUTEX(userns_state_mutex); diff --git a/linux-tkg-patches/5.13/0001-mm-Support-soft-dirty-flag-reset-for-VA-range.patch b/linux-tkg-patches/5.13/0001-mm-Support-soft-dirty-flag-reset-for-VA-range.patch new file mode 100644 index 0000000..ac03fd1 --- /dev/null +++ b/linux-tkg-patches/5.13/0001-mm-Support-soft-dirty-flag-reset-for-VA-range.patch @@ -0,0 +1,244 @@ +From 5ae86c8436b83762bc6cf46bea1da6ace2d3f50e Mon Sep 17 00:00:00 2001 +From: Paul Gofman +Date: Wed, 6 May 2020 14:37:44 +0300 +Subject: [PATCH 1/2] mm: Support soft dirty flag reset for VA range. + +--- + fs/proc/task_mmu.c | 129 ++++++++++++++++++++++++++++++++++++--------- + 1 file changed, 103 insertions(+), 26 deletions(-) + +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index 3cec6fbef725..7c7865028f10 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -1032,6 +1032,8 @@ enum clear_refs_types { + + struct clear_refs_private { + enum clear_refs_types type; ++ unsigned long start, end; ++ bool clear_range; + }; + + #ifdef CONFIG_MEM_SOFT_DIRTY +@@ -1125,6 +1127,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, + spinlock_t *ptl; + struct page *page; + ++ BUG_ON(addr < cp->start || end > cp->end); ++ + ptl = pmd_trans_huge_lock(pmd, vma); + if (ptl) { + if (cp->type == CLEAR_REFS_SOFT_DIRTY) { +@@ -1181,9 +1185,11 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end, + struct clear_refs_private *cp = walk->private; + struct vm_area_struct *vma = walk->vma; + +- if (vma->vm_flags & VM_PFNMAP) ++ if (!cp->clear_range && (vma->vm_flags & VM_PFNMAP)) + return 1; + ++ BUG_ON(start < cp->start || end > cp->end); ++ + /* + * Writing 1 to /proc/pid/clear_refs affects all pages. + * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. +@@ -1206,10 +1212,12 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) + { + struct task_struct *task; +- char buffer[PROC_NUMBUF]; ++ char buffer[18]; + struct mm_struct *mm; + struct vm_area_struct *vma; + enum clear_refs_types type; ++ unsigned long start, end; ++ bool clear_range; + int itype; + int rv; + +@@ -1218,12 +1226,34 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) + return -EFAULT; +- rv = kstrtoint(strstrip(buffer), 10, &itype); +- if (rv < 0) +- return rv; +- type = (enum clear_refs_types)itype; +- if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) +- return -EINVAL; ++ ++ if (buffer[0] == '6') ++ { ++ static int once; ++ ++ if (!once++) ++ printk(KERN_DEBUG "task_mmu: Using POC clear refs range implementation.\n"); ++ ++ if (count != 17) ++ return -EINVAL; ++ ++ type = CLEAR_REFS_SOFT_DIRTY; ++ start = *(unsigned long *)(buffer + 1); ++ end = *(unsigned long *)(buffer + 1 + 8); ++ } ++ else ++ { ++ rv = kstrtoint(strstrip(buffer), 10, &itype); ++ if (rv < 0) ++ return rv; ++ type = (enum clear_refs_types)itype; ++ ++ if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) ++ return -EINVAL; ++ ++ start = 0; ++ end = -1UL; ++ } + + task = get_proc_task(file_inode(file)); + if (!task) +@@ -1235,41 +1265,87 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, + .type = type, + }; + +- if (mmap_write_lock_killable(mm)) { +- count = -EINTR; +- goto out_mm; ++ if (start || end != -1UL) ++ { ++ start = min(start, mm->highest_vm_end) & PAGE_MASK; ++ end = min(end, mm->highest_vm_end) & PAGE_MASK; ++ ++ if (start >= end) ++ { ++ count = -EINVAL; ++ goto out_mm; ++ } ++ clear_range = true; + } ++ else ++ { ++ clear_range = false; ++ } ++ ++ cp.start = start; ++ cp.end = end; ++ cp.clear_range = clear_range; ++ + if (type == CLEAR_REFS_MM_HIWATER_RSS) { ++ if (mmap_write_lock_killable(mm)) { ++ count = -EINTR; ++ goto out_mm; ++ } ++ + /* + * Writing 5 to /proc/pid/clear_refs resets the peak + * resident set size to this mm's current rss value. + */ + reset_mm_hiwater_rss(mm); +- goto out_unlock; ++ mmap_write_unlock(mm); ++ goto out_mm; + } + + if (type == CLEAR_REFS_SOFT_DIRTY) { +- for (vma = mm->mmap; vma; vma = vma->vm_next) { +- if (!(vma->vm_flags & VM_SOFTDIRTY)) +- continue; +- vma->vm_flags &= ~VM_SOFTDIRTY; +- vma_set_page_prot(vma); ++ if (mmap_read_lock_killable(mm)) { ++ count = -EINTR; ++ goto out_mm; + } +- ++ if (!clear_range) ++ for (vma = mm->mmap; vma; vma = vma->vm_next) { ++ if (!(vma->vm_flags & VM_SOFTDIRTY)) ++ continue; ++ mmap_read_unlock(mm); ++ if (mmap_write_lock_killable(mm)) { ++ count = -EINTR; ++ goto out_mm; ++ } ++ for (vma = mm->mmap; vma; vma = vma->vm_next) { ++ vma->vm_flags &= ~VM_SOFTDIRTY; ++ vma_set_page_prot(vma); ++ } ++ mmap_write_downgrade(mm); ++ break; ++ } + inc_tlb_flush_pending(mm); + mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY, +- 0, NULL, mm, 0, -1UL); ++ 0, NULL, mm, start, end); + mmu_notifier_invalidate_range_start(&range); + } +- walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops, ++ else ++ { ++ if (mmap_write_lock_killable(mm)) { ++ count = -EINTR; ++ goto out_mm; ++ } ++ } ++ walk_page_range(mm, start, end == -1UL ? mm->highest_vm_end : end, &clear_refs_walk_ops, + &cp); + if (type == CLEAR_REFS_SOFT_DIRTY) { + mmu_notifier_invalidate_range_end(&range); + flush_tlb_mm(mm); + dec_tlb_flush_pending(mm); ++ mmap_read_unlock(mm); ++ } ++ else ++ { ++ mmap_write_unlock(mm); + } +-out_unlock: +- mmap_write_unlock(mm); + out_mm: + mmput(mm); + } +@@ -1301,6 +1377,7 @@ struct pagemapread { + #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0) + #define PM_SOFT_DIRTY BIT_ULL(55) + #define PM_MMAP_EXCLUSIVE BIT_ULL(56) ++#define PM_SOFT_DIRTY_PAGE BIT_ULL(57) + #define PM_FILE BIT_ULL(61) + #define PM_SWAP BIT_ULL(62) + #define PM_PRESENT BIT_ULL(63) +@@ -1373,11 +1450,11 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, + flags |= PM_PRESENT; + page = vm_normal_page(vma, addr, pte); + if (pte_soft_dirty(pte)) +- flags |= PM_SOFT_DIRTY; ++ flags |= PM_SOFT_DIRTY | PM_SOFT_DIRTY_PAGE; + } else if (is_swap_pte(pte)) { + swp_entry_t entry; + if (pte_swp_soft_dirty(pte)) +- flags |= PM_SOFT_DIRTY; ++ flags |= PM_SOFT_DIRTY | PM_SOFT_DIRTY_PAGE; + entry = pte_to_swp_entry(pte); + if (pm->show_pfn) + frame = swp_type(entry) | +@@ -1424,7 +1501,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, + + flags |= PM_PRESENT; + if (pmd_soft_dirty(pmd)) +- flags |= PM_SOFT_DIRTY; ++ flags |= PM_SOFT_DIRTY | PM_SOFT_DIRTY_PAGE; + if (pm->show_pfn) + frame = pmd_pfn(pmd) + + ((addr & ~PMD_MASK) >> PAGE_SHIFT); +@@ -1442,7 +1519,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, + } + flags |= PM_SWAP; + if (pmd_swp_soft_dirty(pmd)) +- flags |= PM_SOFT_DIRTY; ++ flags |= PM_SOFT_DIRTY | PM_SOFT_DIRTY_PAGE; + VM_BUG_ON(!is_pmd_migration_entry(pmd)); + page = migration_entry_to_page(entry); + } +-- +2.30.2 + diff --git a/linux-tkg-patches/5.13/0002-clear-patches.patch b/linux-tkg-patches/5.13/0002-clear-patches.patch new file mode 100644 index 0000000..22a32f5 --- /dev/null +++ b/linux-tkg-patches/5.13/0002-clear-patches.patch @@ -0,0 +1,360 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Arjan van de Ven +Date: Mon, 14 Mar 2016 11:10:58 -0600 +Subject: [PATCH] pci pme wakeups + +Reduce wakeups for PME checks, which are a workaround for miswired +boards (sadly, too many of them) in laptops. +--- + drivers/pci/pci.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index c9338f9..6974fbf 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -62,7 +62,7 @@ struct pci_pme_device { + struct pci_dev *dev; + }; + +-#define PME_TIMEOUT 1000 /* How long between PME checks */ ++#define PME_TIMEOUT 4000 /* How long between PME checks */ + + static void pci_dev_d3_sleep(struct pci_dev *dev) + { +-- +https://clearlinux.org + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Arjan van de Ven +Date: Sat, 19 Mar 2016 21:32:19 -0400 +Subject: [PATCH] intel_idle: tweak cpuidle cstates + +Increase target_residency in cpuidle cstate + +Tune intel_idle to be a bit less agressive; +Clear linux is cleaner in hygiene (wakupes) than the average linux, +so we can afford changing these in a way that increases +performance while keeping power efficiency +--- + drivers/idle/intel_idle.c | 44 +++++++++++++++++++-------------------- + 1 file changed, 22 insertions(+), 22 deletions(-) + +diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c +index f449584..c994d24 100644 +--- a/drivers/idle/intel_idle.c ++++ b/drivers/idle/intel_idle.c +@@ -531,7 +531,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 10, +- .target_residency = 20, ++ .target_residency = 120, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -539,7 +539,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { + .desc = "MWAIT 0x10", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 33, +- .target_residency = 100, ++ .target_residency = 900, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -547,7 +547,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 133, +- .target_residency = 400, ++ .target_residency = 1000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -555,7 +555,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { + .desc = "MWAIT 0x32", + .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 166, +- .target_residency = 500, ++ .target_residency = 1500, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -563,7 +563,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { + .desc = "MWAIT 0x40", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 300, +- .target_residency = 900, ++ .target_residency = 2000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -571,7 +571,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 600, +- .target_residency = 1800, ++ .target_residency = 5000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -579,7 +579,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = { + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 2600, +- .target_residency = 7700, ++ .target_residency = 9000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -599,7 +599,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 10, +- .target_residency = 20, ++ .target_residency = 120, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -607,7 +607,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { + .desc = "MWAIT 0x10", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 40, +- .target_residency = 100, ++ .target_residency = 1000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -615,7 +615,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 133, +- .target_residency = 400, ++ .target_residency = 1000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -623,7 +623,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { + .desc = "MWAIT 0x32", + .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 166, +- .target_residency = 500, ++ .target_residency = 2000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -631,7 +631,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { + .desc = "MWAIT 0x40", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 300, +- .target_residency = 900, ++ .target_residency = 4000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -639,7 +639,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 600, +- .target_residency = 1800, ++ .target_residency = 7000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -647,7 +647,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = { + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 2600, +- .target_residency = 7700, ++ .target_residency = 9000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -668,7 +668,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 10, +- .target_residency = 20, ++ .target_residency = 120, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -676,7 +676,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { + .desc = "MWAIT 0x10", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 70, +- .target_residency = 100, ++ .target_residency = 1000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -684,7 +684,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 85, +- .target_residency = 200, ++ .target_residency = 600, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -692,7 +692,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { + .desc = "MWAIT 0x33", + .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 124, +- .target_residency = 800, ++ .target_residency = 3000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -700,7 +700,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { + .desc = "MWAIT 0x40", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 200, +- .target_residency = 800, ++ .target_residency = 3200, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -708,7 +708,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 480, +- .target_residency = 5000, ++ .target_residency = 9000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -716,7 +716,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 890, +- .target_residency = 5000, ++ .target_residency = 9000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -737,7 +737,7 @@ static struct cpuidle_state skx_cstates[] __initdata = { + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 10, +- .target_residency = 20, ++ .target_residency = 300, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +-- +https://clearlinux.org + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Arjan van de Ven +Date: Fri, 6 Jan 2017 15:34:09 +0000 +Subject: [PATCH] ipv4/tcp: allow the memory tuning for tcp to go a little + bigger than default + +--- + net/ipv4/tcp.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index 30c1142..4345075 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -4201,8 +4201,8 @@ void __init tcp_init(void) + tcp_init_mem(); + /* Set per-socket limits to no more than 1/128 the pressure threshold */ + limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); +- max_wshare = min(4UL*1024*1024, limit); +- max_rshare = min(6UL*1024*1024, limit); ++ max_wshare = min(16UL*1024*1024, limit); ++ max_rshare = min(16UL*1024*1024, limit); + + init_net.ipv4.sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; + init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024; +-- +https://clearlinux.org + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Arjan van de Ven +Date: Sun, 18 Feb 2018 23:35:41 +0000 +Subject: [PATCH] locking: rwsem: spin faster + +tweak rwsem owner spinning a bit +--- + kernel/locking/rwsem.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c +index f11b9bd..1bbfcc1 100644 +--- a/kernel/locking/rwsem.c ++++ b/kernel/locking/rwsem.c +@@ -717,6 +717,7 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable) + struct task_struct *new, *owner; + unsigned long flags, new_flags; + enum owner_state state; ++ int i = 0; + + owner = rwsem_owner_flags(sem, &flags); + state = rwsem_owner_state(owner, flags, nonspinnable); +@@ -750,7 +751,8 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable) + break; + } + +- cpu_relax(); ++ if (i++ > 1000) ++ cpu_relax(); + } + rcu_read_unlock(); + +-- +https://clearlinux.org + +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Arjan van de Ven +Date: Thu, 2 Jun 2016 23:36:32 -0500 +Subject: [PATCH] initialize ata before graphics + +ATA init is the long pole in the boot process, and its asynchronous. +move the graphics init after it so that ata and graphics initialize +in parallel +--- + drivers/Makefile | 15 ++++++++------- + 1 file changed, 8 insertions(+), 7 deletions(-) + +diff --git a/drivers/Makefile b/drivers/Makefile +index c0cd1b9..af1e2fb 100644 +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -59,15 +59,8 @@ obj-y += char/ + # iommu/ comes before gpu as gpu are using iommu controllers + obj-y += iommu/ + +-# gpu/ comes after char for AGP vs DRM startup and after iommu +-obj-y += gpu/ +- + obj-$(CONFIG_CONNECTOR) += connector/ + +-# i810fb and intelfb depend on char/agp/ +-obj-$(CONFIG_FB_I810) += video/fbdev/i810/ +-obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ +- + obj-$(CONFIG_PARPORT) += parport/ + obj-$(CONFIG_NVM) += lightnvm/ + obj-y += base/ block/ misc/ mfd/ nfc/ +@@ -80,6 +73,14 @@ obj-$(CONFIG_IDE) += ide/ + obj-y += scsi/ + obj-y += nvme/ + obj-$(CONFIG_ATA) += ata/ ++ ++# gpu/ comes after char for AGP vs DRM startup and after iommu ++obj-y += gpu/ ++ ++# i810fb and intelfb depend on char/agp/ ++obj-$(CONFIG_FB_I810) += video/fbdev/i810/ ++obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ ++ + obj-$(CONFIG_TARGET_CORE) += target/ + obj-$(CONFIG_MTD) += mtd/ + obj-$(CONFIG_SPI) += spi/ +-- +https://clearlinux.org + diff --git a/linux-tkg-patches/5.13/0002-mm-Support-soft-dirty-flag-read-with-reset.patch b/linux-tkg-patches/5.13/0002-mm-Support-soft-dirty-flag-read-with-reset.patch new file mode 100644 index 0000000..7dfc910 --- /dev/null +++ b/linux-tkg-patches/5.13/0002-mm-Support-soft-dirty-flag-read-with-reset.patch @@ -0,0 +1,363 @@ +From 9c85113cf4019e7b277a44e72bda8b78347aa72f Mon Sep 17 00:00:00 2001 +From: Paul Gofman +Date: Thu, 7 May 2020 14:05:31 +0300 +Subject: [PATCH 2/2] mm: Support soft dirty flag read with reset. + +--- + fs/proc/base.c | 3 + + fs/proc/internal.h | 1 + + fs/proc/task_mmu.c | 144 +++++++++++++++++++++++++++++++++++++++------ + 3 files changed, 130 insertions(+), 18 deletions(-) + +diff --git a/fs/proc/base.c b/fs/proc/base.c +index b3422cda2a91..8199ae2411ca 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -3202,6 +3202,9 @@ static const struct pid_entry tgid_base_stuff[] = { + REG("smaps", S_IRUGO, proc_pid_smaps_operations), + REG("smaps_rollup", S_IRUGO, proc_pid_smaps_rollup_operations), + REG("pagemap", S_IRUSR, proc_pagemap_operations), ++#ifdef CONFIG_MEM_SOFT_DIRTY ++ REG("pagemap_reset", S_IRUSR, proc_pagemap_reset_operations), ++#endif + #endif + #ifdef CONFIG_SECURITY + DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), +diff --git a/fs/proc/internal.h b/fs/proc/internal.h +index f60b379dcdc7..36a901cf0e7f 100644 +--- a/fs/proc/internal.h ++++ b/fs/proc/internal.h +@@ -303,6 +303,7 @@ extern const struct file_operations proc_pid_smaps_operations; + extern const struct file_operations proc_pid_smaps_rollup_operations; + extern const struct file_operations proc_clear_refs_operations; + extern const struct file_operations proc_pagemap_operations; ++extern const struct file_operations proc_pagemap_reset_operations; + + extern unsigned long task_vsize(struct mm_struct *); + extern unsigned long task_statm(struct mm_struct *, +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index 7c7865028f10..a21694967915 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -1056,8 +1056,8 @@ static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, + return page_maybe_dma_pinned(page); + } + +-static inline void clear_soft_dirty(struct vm_area_struct *vma, +- unsigned long addr, pte_t *pte) ++static inline bool clear_soft_dirty(struct vm_area_struct *vma, ++ unsigned long addr, pte_t *pte) + { + /* + * The soft-dirty tracker uses #PF-s to catch writes +@@ -1066,37 +1066,46 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, + * of how soft-dirty works. + */ + pte_t ptent = *pte; ++ bool ret = false; + + if (pte_present(ptent)) { + pte_t old_pte; + + if (pte_is_pinned(vma, addr, ptent)) +- return; ++ return ret; + old_pte = ptep_modify_prot_start(vma, addr, pte); ++ ret = pte_soft_dirty(old_pte); + ptent = pte_wrprotect(old_pte); + ptent = pte_clear_soft_dirty(ptent); + ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); + } else if (is_swap_pte(ptent)) { ++ ret = pte_swp_soft_dirty(ptent); + ptent = pte_swp_clear_soft_dirty(ptent); + set_pte_at(vma->vm_mm, addr, pte, ptent); + } ++ return ret; + } + #else +-static inline void clear_soft_dirty(struct vm_area_struct *vma, ++static inline bool clear_soft_dirty(struct vm_area_struct *vma, + unsigned long addr, pte_t *pte) + { ++ return false; + } + #endif + + #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE) +-static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, ++static inline bool clear_soft_dirty_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) + { + pmd_t old, pmd = *pmdp; ++ bool ret = false; + + if (pmd_present(pmd)) { + /* See comment in change_huge_pmd() */ + old = pmdp_invalidate(vma, addr, pmdp); ++ ++ ret = pmd_soft_dirty(old); ++ + if (pmd_dirty(old)) + pmd = pmd_mkdirty(pmd); + if (pmd_young(old)) +@@ -1107,14 +1116,17 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, + + set_pmd_at(vma->vm_mm, addr, pmdp, pmd); + } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { ++ ret = pmd_swp_soft_dirty(pmd); + pmd = pmd_swp_clear_soft_dirty(pmd); + set_pmd_at(vma->vm_mm, addr, pmdp, pmd); + } ++ return ret; + } + #else +-static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, ++static inline bool clear_soft_dirty_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) + { ++ return false; + } + #endif + +@@ -1367,6 +1379,7 @@ struct pagemapread { + int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ + pagemap_entry_t *buffer; + bool show_pfn; ++ bool reset; + }; + + #define PAGEMAP_WALK_SIZE (PMD_SIZE) +@@ -1398,6 +1411,14 @@ static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, + return 0; + } + ++static int add_addr_to_pagemap(unsigned long addr, struct pagemapread *pm) ++{ ++ ((unsigned long *)pm->buffer)[pm->pos++] = addr; ++ if (pm->pos >= pm->len) ++ return PM_END_OF_BUFFER; ++ return 0; ++} ++ + static int pagemap_pte_hole(unsigned long start, unsigned long end, + __always_unused int depth, struct mm_walk *walk) + { +@@ -1405,6 +1426,9 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, + unsigned long addr = start; + int err = 0; + ++ if (pm->reset) ++ goto out; ++ + while (addr < end) { + struct vm_area_struct *vma = find_vma(walk->mm, addr); + pagemap_entry_t pme = make_pme(0, 0); +@@ -1439,8 +1463,9 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, + } + + static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, +- struct vm_area_struct *vma, unsigned long addr, pte_t pte) ++ struct vm_area_struct *vma, unsigned long addr, pte_t *pte_addr) + { ++ pte_t pte = *pte_addr; + u64 frame = 0, flags = 0; + struct page *page = NULL; + +@@ -1493,6 +1518,20 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, + pmd_t pmd = *pmdp; + struct page *page = NULL; + ++ if (pm->reset) ++ { ++ if (clear_soft_dirty_pmd(vma, addr, pmdp)) ++ { ++ for (; addr != end; addr += PAGE_SIZE) ++ { ++ err = add_addr_to_pagemap(addr, pm); ++ if (err) ++ break; ++ } ++ } ++ goto trans_huge_done; ++ } ++ + if (vma->vm_flags & VM_SOFTDIRTY) + flags |= PM_SOFT_DIRTY; + +@@ -1541,6 +1580,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, + frame += (1 << MAX_SWAPFILES_SHIFT); + } + } ++trans_huge_done: + spin_unlock(ptl); + return err; + } +@@ -1555,10 +1595,18 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, + */ + orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); + for (; addr < end; pte++, addr += PAGE_SIZE) { +- pagemap_entry_t pme; ++ if (pm->reset) ++ { ++ if (clear_soft_dirty(vma, addr, pte)) ++ err = add_addr_to_pagemap(addr, pm); ++ } ++ else ++ { ++ pagemap_entry_t pme; + +- pme = pte_to_pagemap_entry(pm, vma, addr, *pte); +- err = add_to_pagemap(addr, &pme, pm); ++ pme = pte_to_pagemap_entry(pm, vma, addr, pte); ++ err = add_to_pagemap(addr, &pme, pm); ++ } + if (err) + break; + } +@@ -1650,8 +1698,8 @@ static const struct mm_walk_ops pagemap_ops = { + * determine which areas of memory are actually mapped and llseek to + * skip over unmapped regions. + */ +-static ssize_t pagemap_read(struct file *file, char __user *buf, +- size_t count, loff_t *ppos) ++static ssize_t do_pagemap_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos, bool reset) + { + struct mm_struct *mm = file->private_data; + struct pagemapread pm; +@@ -1660,6 +1708,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, + unsigned long start_vaddr; + unsigned long end_vaddr; + int ret = 0, copied = 0; ++ struct mmu_notifier_range range; ++ size_t buffer_len; + + if (!mm || !mmget_not_zero(mm)) + goto out; +@@ -1675,19 +1725,38 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, + + /* do not disclose physical addresses: attack vector */ + pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN); ++ pm.reset = reset; + +- pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); +- pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL); ++ buffer_len = min(PAGEMAP_WALK_SIZE >> PAGE_SHIFT, count / PM_ENTRY_BYTES); ++ ++ pm.buffer = kmalloc_array(buffer_len, PM_ENTRY_BYTES, GFP_KERNEL); + ret = -ENOMEM; + if (!pm.buffer) + goto out_mm; + + src = *ppos; + svpfn = src / PM_ENTRY_BYTES; +- end_vaddr = mm->task_size; ++ ++ start_vaddr = svpfn << PAGE_SHIFT; ++ ++ if (reset) ++ { ++ if (count < sizeof(end_vaddr)) ++ { ++ ret = -EINVAL; ++ goto out_mm; ++ } ++ if (copy_from_user(&end_vaddr, buf, sizeof(end_vaddr))) ++ return -EFAULT; ++ end_vaddr = min(end_vaddr, mm->task_size); ++ } ++ else ++ { ++ end_vaddr = mm->task_size; ++ start_vaddr = end_vaddr; ++ } + + /* watch out for wraparound */ +- start_vaddr = end_vaddr; + if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) + start_vaddr = untagged_addr(svpfn << PAGE_SHIFT); + +@@ -1707,18 +1776,35 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, + unsigned long end; + + pm.pos = 0; +- end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; ++ pm.len = min(buffer_len, count / PM_ENTRY_BYTES); ++ ++ end = reset ? end_vaddr : (start_vaddr + (pm.len << PAGE_SHIFT)); + /* overflow ? */ + if (end < start_vaddr || end > end_vaddr) + end = end_vaddr; ++ + ret = mmap_read_lock_killable(mm); + if (ret) + goto out_free; ++ ++ if (reset) ++ { ++ inc_tlb_flush_pending(mm); ++ mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY, ++ 0, NULL, mm, start_vaddr, end); ++ mmu_notifier_invalidate_range_start(&range); ++ } + ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm); ++ if (reset) ++ { ++ mmu_notifier_invalidate_range_end(&range); ++ flush_tlb_mm(mm); ++ dec_tlb_flush_pending(mm); ++ } + mmap_read_unlock(mm); +- start_vaddr = end; + + len = min(count, PM_ENTRY_BYTES * pm.pos); ++ BUG_ON(ret && ret != PM_END_OF_BUFFER); + if (copy_to_user(buf, pm.buffer, len)) { + ret = -EFAULT; + goto out_free; +@@ -1726,6 +1812,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, + copied += len; + buf += len; + count -= len; ++ ++ start_vaddr = reset && pm.pos == pm.len ? ((unsigned long *)pm.buffer)[pm.pos - 1] + PAGE_SIZE : end; + } + *ppos += copied; + if (!ret || ret == PM_END_OF_BUFFER) +@@ -1739,6 +1827,18 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, + return ret; + } + ++static ssize_t pagemap_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ return do_pagemap_read(file, buf, count, ppos, false); ++} ++ ++static ssize_t pagemap_reset_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ return do_pagemap_read(file, buf, count, ppos, true); ++} ++ + static int pagemap_open(struct inode *inode, struct file *file) + { + struct mm_struct *mm; +@@ -1765,6 +1865,14 @@ const struct file_operations proc_pagemap_operations = { + .open = pagemap_open, + .release = pagemap_release, + }; ++ ++const struct file_operations proc_pagemap_reset_operations = { ++ .llseek = mem_lseek, /* borrow this */ ++ .read = pagemap_reset_read, ++ .open = pagemap_open, ++ .release = pagemap_release, ++}; ++ + #endif /* CONFIG_PROC_PAGE_MONITOR */ + + #ifdef CONFIG_NUMA +-- +2.30.2 + diff --git a/linux-tkg-patches/5.13/0003-glitched-base.patch b/linux-tkg-patches/5.13/0003-glitched-base.patch new file mode 100644 index 0000000..3c48608 --- /dev/null +++ b/linux-tkg-patches/5.13/0003-glitched-base.patch @@ -0,0 +1,679 @@ +From f7f49141a5dbe9c99d78196b58c44307fb2e6be3 Mon Sep 17 00:00:00 2001 +From: Tk-Glitch +Date: Wed, 4 Jul 2018 04:30:08 +0200 +Subject: [PATCH 01/17] glitched + +--- + scripts/mkcompile_h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h +index baf3ab8d9d49..854e32e6aec7 100755 +--- a/scripts/mkcompile_h ++++ b/scripts/mkcompile_h +@@ -41,8 +41,8 @@ else + fi + + UTS_VERSION="#$VERSION" +-CONFIG_FLAGS="" +-if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi ++CONFIG_FLAGS="TKG" ++if [ -n "$SMP" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS SMP"; fi + if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi + if [ -n "$PREEMPT_RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT_RT"; fi + +-- +2.28.0 + + +From c304f43d14e98d4bf1215fc10bc5012f554bdd8a Mon Sep 17 00:00:00 2001 +From: Alexandre Frade +Date: Mon, 29 Jan 2018 16:59:22 +0000 +Subject: [PATCH 02/17] dcache: cache_pressure = 50 decreases the rate at which + VFS caches are reclaimed + +Signed-off-by: Alexandre Frade +--- + fs/dcache.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/fs/dcache.c b/fs/dcache.c +index 361ea7ab30ea..0c5cf69b241a 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -71,7 +71,7 @@ + * If no ancestor relationship: + * arbitrary, since it's serialized on rename_lock + */ +-int sysctl_vfs_cache_pressure __read_mostly = 100; ++int sysctl_vfs_cache_pressure __read_mostly = 50; + EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); + + __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); +-- +2.28.0 + + +From 28f32f59d9d55ac7ec3a20b79bdd02d2a0a5f7e1 Mon Sep 17 00:00:00 2001 +From: Alexandre Frade +Date: Mon, 29 Jan 2018 18:29:13 +0000 +Subject: [PATCH 03/17] sched/core: nr_migrate = 128 increases number of tasks + to iterate in a single balance run. + +Signed-off-by: Alexandre Frade +--- + kernel/sched/core.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index f788cd61df21..2bfbb4213707 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -59,7 +59,7 @@ const_debug unsigned int sysctl_sched_features = + * Number of tasks to iterate in a single balance run. + * Limited because this is done with IRQs disabled. + */ +-const_debug unsigned int sysctl_sched_nr_migrate = 32; ++const_debug unsigned int sysctl_sched_nr_migrate = 128; + + /* + * period over which we measure -rt task CPU usage in us. +@@ -71,9 +71,9 @@ __read_mostly int scheduler_running; + + /* + * part of the period that we allow rt tasks to run in us. +- * default: 0.95s ++ * XanMod default: 0.98s + */ +-int sysctl_sched_rt_runtime = 950000; ++int sysctl_sched_rt_runtime = 980000; + + /* + * __task_rq_lock - lock the rq @p resides on. +-- +2.28.0 + + +From acc49f33a10f61dc66c423888cbb883ba46710e4 Mon Sep 17 00:00:00 2001 +From: Alexandre Frade +Date: Mon, 29 Jan 2018 17:41:29 +0000 +Subject: [PATCH 04/17] scripts: disable the localversion "+" tag of a git repo + +Signed-off-by: Alexandre Frade +--- + scripts/setlocalversion | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/scripts/setlocalversion b/scripts/setlocalversion +index 20f2efd57b11..0552d8b9f582 100755 +--- a/scripts/setlocalversion ++++ b/scripts/setlocalversion +@@ -54,7 +54,7 @@ scm_version() + # If only the short version is requested, don't bother + # running further git commands + if $short; then +- echo "+" ++ # echo "+" + return + fi + # If we are past a tagged commit (like +-- +2.28.0 + + +From 61fcb33fb0de8bc0f060e0a1ada38ed149217f4d Mon Sep 17 00:00:00 2001 +From: Oleksandr Natalenko +Date: Wed, 11 Dec 2019 11:46:19 +0100 +Subject: [PATCH 05/17] init/Kconfig: enable -O3 for all arches + +Building a kernel with -O3 may help in hunting bugs like [1] and thus +using this switch should not be restricted to one specific arch only. + +With that, lets expose it for everyone. + +[1] https://lore.kernel.org/lkml/673b885183fb64f1cbb3ed2387524077@natalenko.name/ + +Signed-off-by: Oleksandr Natalenko +--- + init/Kconfig | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/init/Kconfig b/init/Kconfig +index 0498af567f70..3ae8678e1145 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1278,7 +1278,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE + + config CC_OPTIMIZE_FOR_PERFORMANCE_O3 + bool "Optimize more for performance (-O3)" +- depends on ARC + help + Choosing this option will pass "-O3" to your compiler to optimize + the kernel yet more for performance. +-- +2.28.0 + + +From 360c6833e07cc9fdef5746f6bc45bdbc7212288d Mon Sep 17 00:00:00 2001 +From: "Jan Alexander Steffens (heftig)" +Date: Fri, 26 Oct 2018 11:22:33 +0100 +Subject: [PATCH 06/17] infiniband: Fix __read_overflow2 error with -O3 + inlining + +--- + drivers/infiniband/core/addr.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c +index 3a98439bba83..6efc4f907f58 100644 +--- a/drivers/infiniband/core/addr.c ++++ b/drivers/infiniband/core/addr.c +@@ -820,6 +820,7 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, + union { + struct sockaddr_in _sockaddr_in; + struct sockaddr_in6 _sockaddr_in6; ++ struct sockaddr_ib _sockaddr_ib; + } sgid_addr, dgid_addr; + int ret; + +-- +2.28.0 + + +From f85ed068b4d0e6c31edce8574a95757a60e58b87 Mon Sep 17 00:00:00 2001 +From: Etienne Juvigny +Date: Mon, 3 Sep 2018 17:36:25 +0200 +Subject: [PATCH 07/17] Zenify & stuff + +--- + init/Kconfig | 32 ++++++++++++++++++++++++++++++++ + kernel/sched/fair.c | 25 +++++++++++++++++++++++++ + mm/page-writeback.c | 8 ++++++++ + 3 files changed, 65 insertions(+) + +diff --git a/init/Kconfig b/init/Kconfig +index 3ae8678e1145..da708eed0f1e 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -92,6 +92,38 @@ config THREAD_INFO_IN_TASK + + menu "General setup" + ++config ZENIFY ++ bool "A selection of patches from Zen/Liquorix kernel and additional tweaks for a better gaming experience" ++ default y ++ help ++ Tunes the kernel for responsiveness at the cost of throughput and power usage. ++ ++ --- Virtual Memory Subsystem --------------------------- ++ ++ Mem dirty before bg writeback..: 10 % -> 20 % ++ Mem dirty before sync writeback: 20 % -> 50 % ++ ++ --- Block Layer ---------------------------------------- ++ ++ Queue depth...............: 128 -> 512 ++ Default MQ scheduler......: mq-deadline -> bfq ++ ++ --- CFS CPU Scheduler ---------------------------------- ++ ++ Scheduling latency.............: 6 -> 3 ms ++ Minimal granularity............: 0.75 -> 0.3 ms ++ Wakeup granularity.............: 1 -> 0.5 ms ++ CPU migration cost.............: 0.5 -> 0.25 ms ++ Bandwidth slice size...........: 5 -> 3 ms ++ Ondemand fine upscaling limit..: 95 % -> 85 % ++ ++ --- MuQSS CPU Scheduler -------------------------------- ++ ++ Scheduling interval............: 6 -> 3 ms ++ ISO task max realtime use......: 70 % -> 25 % ++ Ondemand coarse upscaling limit: 80 % -> 45 % ++ Ondemand fine upscaling limit..: 95 % -> 45 % ++ + config BROKEN + bool + +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 6b3b59cc51d6..2a0072192c3d 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -37,8 +37,13 @@ + * + * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_ZENIFY ++unsigned int sysctl_sched_latency = 3000000ULL; ++static unsigned int normalized_sysctl_sched_latency = 3000000ULL; ++#else + unsigned int sysctl_sched_latency = 6000000ULL; + static unsigned int normalized_sysctl_sched_latency = 6000000ULL; ++#endif + + /* + * The initial- and re-scaling of tunables is configurable +@@ -58,13 +63,22 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L + * + * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_ZENIFY ++unsigned int sysctl_sched_min_granularity = 300000ULL; ++static unsigned int normalized_sysctl_sched_min_granularity = 300000ULL; ++#else + unsigned int sysctl_sched_min_granularity = 750000ULL; + static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; ++#endif + + /* + * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity + */ ++#ifdef CONFIG_ZENIFY ++static unsigned int sched_nr_latency = 10; ++#else + static unsigned int sched_nr_latency = 8; ++#endif + + /* + * After fork, child runs first. If set to 0 (default) then +@@ -81,10 +95,17 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; + * + * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_ZENIFY ++unsigned int sysctl_sched_wakeup_granularity = 500000UL; ++static unsigned int normalized_sysctl_sched_wakeup_granularity = 500000UL; ++ ++const_debug unsigned int sysctl_sched_migration_cost = 50000UL; ++#else + unsigned int sysctl_sched_wakeup_granularity = 1000000UL; + static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; + + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; ++#endif + + int sched_thermal_decay_shift; + static int __init setup_sched_thermal_decay_shift(char *str) +@@ -128,8 +149,12 @@ int __weak arch_asym_cpu_priority(int cpu) + * + * (default: 5 msec, units: microseconds) + */ ++#ifdef CONFIG_ZENIFY ++unsigned int sysctl_sched_cfs_bandwidth_slice = 3000UL; ++#else + unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; + #endif ++#endif + + static inline void update_load_add(struct load_weight *lw, unsigned long inc) + { +diff --git a/mm/page-writeback.c b/mm/page-writeback.c +index 28b3e7a67565..01a1aef2b9b1 100644 +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -71,7 +71,11 @@ static long ratelimit_pages = 32; + /* + * Start background writeback (via writeback threads) at this percentage + */ ++#ifdef CONFIG_ZENIFY ++int dirty_background_ratio = 20; ++#else + int dirty_background_ratio = 10; ++#endif + + /* + * dirty_background_bytes starts at 0 (disabled) so that it is a function of +@@ -88,7 +92,11 @@ int vm_highmem_is_dirtyable; + /* + * The generator of dirty data starts writeback at this percentage + */ ++#ifdef CONFIG_ZENIFY ++int vm_dirty_ratio = 50; ++#else + int vm_dirty_ratio = 20; ++#endif + + /* + * vm_dirty_bytes starts at 0 (disabled) so that it is a function of +-- +2.28.0 + + +From e92e67143385cf285851e12aa8b7f083dd38dd24 Mon Sep 17 00:00:00 2001 +From: Steven Barrett +Date: Sun, 16 Jan 2011 18:57:32 -0600 +Subject: [PATCH 08/17] ZEN: Allow TCP YeAH as default congestion control + +4.4: In my tests YeAH dramatically slowed down transfers over a WLAN, + reducing throughput from ~65Mbps (CUBIC) to ~7MBps (YeAH) over 10 + seconds (netperf TCP_STREAM) including long stalls. + + Be careful when choosing this. ~heftig +--- + net/ipv4/Kconfig | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig +index e64e59b536d3..bfb55ef7ebbe 100644 +--- a/net/ipv4/Kconfig ++++ b/net/ipv4/Kconfig +@@ -691,6 +691,9 @@ choice + config DEFAULT_VEGAS + bool "Vegas" if TCP_CONG_VEGAS=y + ++ config DEFAULT_YEAH ++ bool "YeAH" if TCP_CONG_YEAH=y ++ + config DEFAULT_VENO + bool "Veno" if TCP_CONG_VENO=y + +@@ -724,6 +727,7 @@ config DEFAULT_TCP_CONG + default "htcp" if DEFAULT_HTCP + default "hybla" if DEFAULT_HYBLA + default "vegas" if DEFAULT_VEGAS ++ default "yeah" if DEFAULT_YEAH + default "westwood" if DEFAULT_WESTWOOD + default "veno" if DEFAULT_VENO + default "reno" if DEFAULT_RENO +-- +2.28.0 + + +From 76dbe7477bfde1b5e8bf29a71b5af7ab2be9b98e Mon Sep 17 00:00:00 2001 +From: Steven Barrett +Date: Wed, 28 Nov 2018 19:01:27 -0600 +Subject: [PATCH 09/17] zen: Use [defer+madvise] as default khugepaged defrag + strategy + +For some reason, the default strategy to respond to THP fault fallbacks +is still just madvise, meaning stall if the program wants transparent +hugepages, but don't trigger a background reclaim / compaction if THP +begins to fail allocations. This creates a snowball affect where we +still use the THP code paths, but we almost always fail once a system +has been active and busy for a while. + +The option "defer" was created for interactive systems where THP can +still improve performance. If we have to fallback to a regular page due +to an allocation failure or anything else, we will trigger a background +reclaim and compaction so future THP attempts succeed and previous +attempts eventually have their smaller pages combined without stalling +running applications. + +We still want madvise to stall applications that explicitely want THP, +so defer+madvise _does_ make a ton of sense. Make it the default for +interactive systems, especially if the kernel maintainer left +transparent hugepages on "always". + +Reasoning and details in the original patch: https://lwn.net/Articles/711248/ +--- + mm/huge_memory.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 74300e337c3c..9277f22c10a7 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -53,7 +53,11 @@ unsigned long transparent_hugepage_flags __read_mostly = + #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE + (1< +Date: Wed, 24 Oct 2018 16:58:52 -0300 +Subject: [PATCH 10/17] net/sched: allow configuring cake qdisc as default + +Signed-off-by: Alexandre Frade +--- + net/sched/Kconfig | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/net/sched/Kconfig b/net/sched/Kconfig +index 84badf00647e..6a922bca9f39 100644 +--- a/net/sched/Kconfig ++++ b/net/sched/Kconfig +@@ -471,6 +471,9 @@ choice + config DEFAULT_SFQ + bool "Stochastic Fair Queue" if NET_SCH_SFQ + ++ config DEFAULT_CAKE ++ bool "Common Applications Kept Enhanced" if NET_SCH_CAKE ++ + config DEFAULT_PFIFO_FAST + bool "Priority FIFO Fast" + endchoice +@@ -481,6 +484,7 @@ config DEFAULT_NET_SCH + default "fq" if DEFAULT_FQ + default "fq_codel" if DEFAULT_FQ_CODEL + default "sfq" if DEFAULT_SFQ ++ default "cake" if DEFAULT_CAKE + default "pfifo_fast" + endif + +-- +2.28.0 + + +From 816ee502759e954304693813bd03d94986b28dba Mon Sep 17 00:00:00 2001 +From: Tk-Glitch +Date: Mon, 18 Feb 2019 17:40:57 +0100 +Subject: [PATCH 11/17] mm: Set watermark_scale_factor to 200 (from 10) + +Multiple users have reported it's helping reducing/eliminating stuttering +with DXVK. +--- + mm/page_alloc.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 898ff44f2c7b..e72074034793 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -330,7 +330,7 @@ int watermark_boost_factor __read_mostly; + #else + int watermark_boost_factor __read_mostly = 15000; + #endif +-int watermark_scale_factor = 10; ++int watermark_scale_factor = 200; + + static unsigned long nr_kernel_pages __initdata; + static unsigned long nr_all_pages __initdata; +-- +2.28.0 + + +From 90240bcd90a568878738e66c0d45bed3e38e347b Mon Sep 17 00:00:00 2001 +From: Tk-Glitch +Date: Fri, 19 Apr 2019 12:33:38 +0200 +Subject: [PATCH 12/17] Set vm.max_map_count to 262144 by default + +The value is still pretty low, and AMD64-ABI and ELF extended numbering +supports that, so we should be fine on modern x86 systems. + +This fixes crashes in some applications using more than 65535 vmas (also +affects some windows games running in wine, such as Star Citizen). +--- + include/linux/mm.h | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/include/linux/mm.h b/include/linux/mm.h +index bc05c3588aa3..b0cefe94920d 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -190,8 +190,7 @@ static inline void __mm_zero_struct_page(struct page *page) + * not a hard limit any more. Although some userspace tools can be surprised by + * that. + */ +-#define MAPCOUNT_ELF_CORE_MARGIN (5) +-#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) ++#define DEFAULT_MAX_MAP_COUNT (262144) + + extern int sysctl_max_map_count; + +-- +2.28.0 + + +From 3a34034dba5efe91bcec491efe8c66e8087f509b Mon Sep 17 00:00:00 2001 +From: Tk-Glitch +Date: Mon, 27 Jul 2020 00:19:18 +0200 +Subject: [PATCH 13/17] mm: bump DEFAULT_MAX_MAP_COUNT + +Some games such as Detroit: Become Human tend to be very crash prone with +lower values. +--- + include/linux/mm.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/linux/mm.h b/include/linux/mm.h +index b0cefe94920d..890165099b07 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -190,7 +190,7 @@ static inline void __mm_zero_struct_page(struct page *page) + * not a hard limit any more. Although some userspace tools can be surprised by + * that. + */ +-#define DEFAULT_MAX_MAP_COUNT (262144) ++#define DEFAULT_MAX_MAP_COUNT (524288) + + extern int sysctl_max_map_count; + +-- +2.28.0 + + +From 977812938da7c7226415778c340832141d9278b7 Mon Sep 17 00:00:00 2001 +From: Alexandre Frade +Date: Mon, 25 Nov 2019 15:13:06 -0300 +Subject: [PATCH 14/17] elevator: set default scheduler to bfq for blk-mq + +Signed-off-by: Alexandre Frade +--- + block/elevator.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/block/elevator.c b/block/elevator.c +index 4eab3d70e880..79669aa39d79 100644 +--- a/block/elevator.c ++++ b/block/elevator.c +@@ -623,16 +623,16 @@ static inline bool elv_support_iosched(struct request_queue *q) + } + + /* +- * For single queue devices, default to using mq-deadline. If we have multiple +- * queues or mq-deadline is not available, default to "none". ++ * For single queue devices, default to using bfq. If we have multiple ++ * queues or bfq is not available, default to "none". + */ + static struct elevator_type *elevator_get_default(struct request_queue *q) + { + if (q->nr_hw_queues != 1 && + !blk_mq_is_sbitmap_shared(q->tag_set->flags)) + return NULL; + +- return elevator_get(q, "mq-deadline", false); ++ return elevator_get(q, "bfq", false); + } + + /* +-- +2.28.0 + +From 3c229f434aca65c4ca61772bc03c3e0370817b92 Mon Sep 17 00:00:00 2001 +From: Alexandre Frade +Date: Mon, 3 Aug 2020 17:05:04 +0000 +Subject: [PATCH 16/17] mm: set 2 megabytes for address_space-level file + read-ahead pages size + +Signed-off-by: Alexandre Frade +--- + include/linux/pagemap.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h +index cf2468da68e9..007dea784451 100644 +--- a/include/linux/pagemap.h ++++ b/include/linux/pagemap.h +@@ -655,7 +655,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); + void delete_from_page_cache_batch(struct address_space *mapping, + struct pagevec *pvec); + +-#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) ++#define VM_READAHEAD_PAGES (SZ_2M / PAGE_SIZE) + + void page_cache_sync_readahead(struct address_space *, struct file_ra_state *, + struct file *, pgoff_t index, unsigned long req_count); +-- +2.28.0 + + +From 716f41cf6631f3a85834dcb67b4ce99185b6387f Mon Sep 17 00:00:00 2001 +From: Steven Barrett +Date: Wed, 15 Jan 2020 20:43:56 -0600 +Subject: [PATCH 17/17] ZEN: intel-pstate: Implement "enable" parameter + +If intel-pstate is compiled into the kernel, it will preempt the loading +of acpi-cpufreq so you can take advantage of hardware p-states without +any friction. + +However, intel-pstate is not completely superior to cpufreq's ondemand +for one reason. There's no concept of an up_threshold property. + +In ondemand, up_threshold essentially reduces the maximum utilization to +compare against, allowing you to hit max frequencies and turbo boost +from a much lower core utilization. + +With intel-pstate, you have the concept of minimum and maximum +performance, but no tunable that lets you define, maximum frequency +means 50% core utilization. For just this oversight, there's reasons +you may want ondemand. + +Lets support setting "enable" in kernel boot parameters. This lets +kernel maintainers include "intel_pstate=disable" statically in the +static boot parameters, but let users of the kernel override this +selection. +--- + Documentation/admin-guide/kernel-parameters.txt | 3 +++ + drivers/cpufreq/intel_pstate.c | 2 ++ + 2 files changed, 5 insertions(+) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index fb95fad81c79..3e92fee81e33 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -1857,6 +1857,9 @@ + disable + Do not enable intel_pstate as the default + scaling driver for the supported processors ++ enable ++ Enable intel_pstate in-case "disable" was passed ++ previously in the kernel boot parameters + passive + Use intel_pstate as a scaling driver, but configure it + to work with generic cpufreq governors (instead of +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c +index 36a469150ff9..aee891c9b78a 100644 +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -2845,6 +2845,8 @@ static int __init intel_pstate_setup(char *str) + pr_info("HWP disabled\n"); + no_hwp = 1; + } ++ if (!strcmp(str, "enable")) ++ no_load = 0; + if (!strcmp(str, "force")) + force_load = 1; + if (!strcmp(str, "hwp_only")) +-- +2.28.0 + diff --git a/linux-tkg-patches/5.13/0003-glitched-cfs.patch b/linux-tkg-patches/5.13/0003-glitched-cfs.patch new file mode 100644 index 0000000..06b7f02 --- /dev/null +++ b/linux-tkg-patches/5.13/0003-glitched-cfs.patch @@ -0,0 +1,72 @@ +diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz +index 2a202a846757..1d9c7ed79b11 100644 +--- a/kernel/Kconfig.hz ++++ b/kernel/Kconfig.hz +@@ -4,7 +4,7 @@ + + choice + prompt "Timer frequency" +- default HZ_250 ++ default HZ_500 + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -39,6 +39,13 @@ choice + on SMP and NUMA systems and exactly dividing by both PAL and + NTSC frame rates for video and multimedia work. + ++ config HZ_500 ++ bool "500 HZ" ++ help ++ 500 Hz is a balanced timer frequency. Provides fast interactivity ++ on desktops with great smoothness without increasing CPU power ++ consumption and sacrificing the battery life on laptops. ++ + config HZ_1000 + bool "1000 HZ" + help +@@ -52,6 +59,7 @@ config HZ + default 100 if HZ_100 + default 250 if HZ_250 + default 300 if HZ_300 ++ default 500 if HZ_500 + default 1000 if HZ_1000 + + config SCHED_HRTICK + +diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz +index 2a202a846757..1d9c7ed79b11 100644 +--- a/kernel/Kconfig.hz ++++ b/kernel/Kconfig.hz +@@ -4,7 +4,7 @@ + + choice + prompt "Timer frequency" +- default HZ_500 ++ default HZ_750 + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -46,6 +46,13 @@ choice + on desktops with great smoothness without increasing CPU power + consumption and sacrificing the battery life on laptops. + ++ config HZ_750 ++ bool "750 HZ" ++ help ++ 750 Hz is a good timer frequency for desktops. Provides fast ++ interactivity with great smoothness without sacrificing too ++ much throughput. ++ + config HZ_1000 + bool "1000 HZ" + help +@@ -60,6 +67,7 @@ config HZ + default 250 if HZ_250 + default 300 if HZ_300 + default 500 if HZ_500 ++ default 750 if HZ_750 + default 1000 if HZ_1000 + + config SCHED_HRTICK + diff --git a/linux-tkg-patches/5.13/0005-glitched-pds.patch b/linux-tkg-patches/5.13/0005-glitched-pds.patch new file mode 100644 index 0000000..08c9ef3 --- /dev/null +++ b/linux-tkg-patches/5.13/0005-glitched-pds.patch @@ -0,0 +1,90 @@ +From f7f49141a5dbe9c99d78196b58c44307fb2e6be3 Mon Sep 17 00:00:00 2001 +From: Tk-Glitch +Date: Wed, 4 Jul 2018 04:30:08 +0200 +Subject: glitched - PDS + +diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz +index 2a202a846757..1d9c7ed79b11 100644 +--- a/kernel/Kconfig.hz ++++ b/kernel/Kconfig.hz +@@ -4,7 +4,7 @@ + + choice + prompt "Timer frequency" +- default HZ_250 ++ default HZ_500 + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -39,6 +39,13 @@ choice + on SMP and NUMA systems and exactly dividing by both PAL and + NTSC frame rates for video and multimedia work. + ++ config HZ_500 ++ bool "500 HZ" ++ help ++ 500 Hz is a balanced timer frequency. Provides fast interactivity ++ on desktops with great smoothness without increasing CPU power ++ consumption and sacrificing the battery life on laptops. ++ + config HZ_1000 + bool "1000 HZ" + help +@@ -52,6 +59,7 @@ config HZ + default 100 if HZ_100 + default 250 if HZ_250 + default 300 if HZ_300 ++ default 500 if HZ_500 + default 1000 if HZ_1000 + + config SCHED_HRTICK + +diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz +index 2a202a846757..1d9c7ed79b11 100644 +--- a/kernel/Kconfig.hz ++++ b/kernel/Kconfig.hz +@@ -4,7 +4,7 @@ + + choice + prompt "Timer frequency" +- default HZ_500 ++ default HZ_750 + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -46,6 +46,13 @@ choice + on desktops with great smoothness without increasing CPU power + consumption and sacrificing the battery life on laptops. + ++ config HZ_750 ++ bool "750 HZ" ++ help ++ 750 Hz is a good timer frequency for desktops. Provides fast ++ interactivity with great smoothness without sacrificing too ++ much throughput. ++ + config HZ_1000 + bool "1000 HZ" + help +@@ -60,6 +67,7 @@ config HZ + default 250 if HZ_250 + default 300 if HZ_300 + default 500 if HZ_500 ++ default 750 if HZ_750 + default 1000 if HZ_1000 + + config SCHED_HRTICK + +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 9270a4370d54..30d01e647417 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -169,7 +169,7 @@ + /* + * From 0 .. 200. Higher means more swappy. + */ +-int vm_swappiness = 60; ++int vm_swappiness = 20; + + static void set_task_reclaim_state(struct task_struct *task, + struct reclaim_state *rs) diff --git a/linux-tkg-patches/5.13/0006-add-acs-overrides_iommu.patch b/linux-tkg-patches/5.13/0006-add-acs-overrides_iommu.patch new file mode 100644 index 0000000..d1303a5 --- /dev/null +++ b/linux-tkg-patches/5.13/0006-add-acs-overrides_iommu.patch @@ -0,0 +1,193 @@ +From cdeab384f48dd9c88e2dff2e9ad8d57dca1a1b1c Mon Sep 17 00:00:00 2001 +From: Mark Weiman +Date: Sun, 12 Aug 2018 11:36:21 -0400 +Subject: [PATCH] pci: Enable overrides for missing ACS capabilities + +This an updated version of Alex Williamson's patch from: +https://lkml.org/lkml/2013/5/30/513 + +Original commit message follows: + +PCIe ACS (Access Control Services) is the PCIe 2.0+ feature that +allows us to control whether transactions are allowed to be redirected +in various subnodes of a PCIe topology. For instance, if two +endpoints are below a root port or downsteam switch port, the +downstream port may optionally redirect transactions between the +devices, bypassing upstream devices. The same can happen internally +on multifunction devices. The transaction may never be visible to the +upstream devices. + +One upstream device that we particularly care about is the IOMMU. If +a redirection occurs in the topology below the IOMMU, then the IOMMU +cannot provide isolation between devices. This is why the PCIe spec +encourages topologies to include ACS support. Without it, we have to +assume peer-to-peer DMA within a hierarchy can bypass IOMMU isolation. + +Unfortunately, far too many topologies do not support ACS to make this +a steadfast requirement. Even the latest chipsets from Intel are only +sporadically supporting ACS. We have trouble getting interconnect +vendors to include the PCIe spec required PCIe capability, let alone +suggested features. + +Therefore, we need to add some flexibility. The pcie_acs_override= +boot option lets users opt-in specific devices or sets of devices to +assume ACS support. The "downstream" option assumes full ACS support +on root ports and downstream switch ports. The "multifunction" +option assumes the subset of ACS features available on multifunction +endpoints and upstream switch ports are supported. The "id:nnnn:nnnn" +option enables ACS support on devices matching the provided vendor +and device IDs, allowing more strategic ACS overrides. These options +may be combined in any order. A maximum of 16 id specific overrides +are available. It's suggested to use the most limited set of options +necessary to avoid completely disabling ACS across the topology. +Note to hardware vendors, we have facilities to permanently quirk +specific devices which enforce isolation but not provide an ACS +capability. Please contact me to have your devices added and save +your customers the hassle of this boot option. + +Signed-off-by: Mark Weiman +--- + .../admin-guide/kernel-parameters.txt | 9 ++ + drivers/pci/quirks.c | 101 ++++++++++++++++++ + 2 files changed, 110 insertions(+) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index aefd358a5ca3..173b3596fd9e 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -3190,6 +3190,15 @@ + nomsi [MSI] If the PCI_MSI kernel config parameter is + enabled, this kernel boot option can be used to + disable the use of MSI interrupts system-wide. ++ pcie_acs_override = ++ [PCIE] Override missing PCIe ACS support for: ++ downstream ++ All downstream ports - full ACS capabilities ++ multifunction ++ All multifunction devices - multifunction ACS subset ++ id:nnnn:nnnn ++ Specific device - full ACS capabilities ++ Specified as vid:did (vendor/device ID) in hex + noioapicquirk [APIC] Disable all boot interrupt quirks. + Safety option to keep boot IRQs enabled. This + should never be necessary. +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 4700d24e5d55..8f7a3d7fd9c1 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -3372,6 +3372,106 @@ static void quirk_no_bus_reset(struct pci_dev *dev) + dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; + } + ++static bool acs_on_downstream; ++static bool acs_on_multifunction; ++ ++#define NUM_ACS_IDS 16 ++struct acs_on_id { ++ unsigned short vendor; ++ unsigned short device; ++}; ++static struct acs_on_id acs_on_ids[NUM_ACS_IDS]; ++static u8 max_acs_id; ++ ++static __init int pcie_acs_override_setup(char *p) ++{ ++ if (!p) ++ return -EINVAL; ++ ++ while (*p) { ++ if (!strncmp(p, "downstream", 10)) ++ acs_on_downstream = true; ++ if (!strncmp(p, "multifunction", 13)) ++ acs_on_multifunction = true; ++ if (!strncmp(p, "id:", 3)) { ++ char opt[5]; ++ int ret; ++ long val; ++ ++ if (max_acs_id >= NUM_ACS_IDS - 1) { ++ pr_warn("Out of PCIe ACS override slots (%d)\n", ++ NUM_ACS_IDS); ++ goto next; ++ } ++ ++ p += 3; ++ snprintf(opt, 5, "%s", p); ++ ret = kstrtol(opt, 16, &val); ++ if (ret) { ++ pr_warn("PCIe ACS ID parse error %d\n", ret); ++ goto next; ++ } ++ acs_on_ids[max_acs_id].vendor = val; ++ ++ p += strcspn(p, ":"); ++ if (*p != ':') { ++ pr_warn("PCIe ACS invalid ID\n"); ++ goto next; ++ } ++ ++ p++; ++ snprintf(opt, 5, "%s", p); ++ ret = kstrtol(opt, 16, &val); ++ if (ret) { ++ pr_warn("PCIe ACS ID parse error %d\n", ret); ++ goto next; ++ } ++ acs_on_ids[max_acs_id].device = val; ++ max_acs_id++; ++ } ++next: ++ p += strcspn(p, ","); ++ if (*p == ',') ++ p++; ++ } ++ ++ if (acs_on_downstream || acs_on_multifunction || max_acs_id) ++ pr_warn("Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA\n"); ++ ++ return 0; ++} ++early_param("pcie_acs_override", pcie_acs_override_setup); ++ ++static int pcie_acs_overrides(struct pci_dev *dev, u16 acs_flags) ++{ ++ int i; ++ ++ /* Never override ACS for legacy devices or devices with ACS caps */ ++ if (!pci_is_pcie(dev) || ++ pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS)) ++ return -ENOTTY; ++ ++ for (i = 0; i < max_acs_id; i++) ++ if (acs_on_ids[i].vendor == dev->vendor && ++ acs_on_ids[i].device == dev->device) ++ return 1; ++ ++ switch (pci_pcie_type(dev)) { ++ case PCI_EXP_TYPE_DOWNSTREAM: ++ case PCI_EXP_TYPE_ROOT_PORT: ++ if (acs_on_downstream) ++ return 1; ++ break; ++ case PCI_EXP_TYPE_ENDPOINT: ++ case PCI_EXP_TYPE_UPSTREAM: ++ case PCI_EXP_TYPE_LEG_END: ++ case PCI_EXP_TYPE_RC_END: ++ if (acs_on_multifunction && dev->multifunction) ++ return 1; ++ } ++ ++ return -ENOTTY; ++} + /* + * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset. + * The device will throw a Link Down error on AER-capable systems and +@@ -4513,6 +4613,7 @@ static const struct pci_dev_acs_enabled { + { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs }, + /* Zhaoxin Root/Downstream Ports */ + { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, ++ { PCI_ANY_ID, PCI_ANY_ID, pcie_acs_overrides }, + { 0 } + }; + + diff --git a/linux-tkg-patches/5.13/0007-v5.13-winesync.patch b/linux-tkg-patches/5.13/0007-v5.13-winesync.patch new file mode 100644 index 0000000..6476c2b --- /dev/null +++ b/linux-tkg-patches/5.13/0007-v5.13-winesync.patch @@ -0,0 +1,3127 @@ +diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt +index 63fd4e6a014b..dc9a30b5f1a2 100644 +--- a/Documentation/admin-guide/devices.txt ++++ b/Documentation/admin-guide/devices.txt +@@ -376,8 +376,9 @@ + 240 = /dev/userio Serio driver testing device + 241 = /dev/vhost-vsock Host kernel driver for virtio vsock + 242 = /dev/rfkill Turning off radio transmissions (rfkill) ++ 243 = /dev/winesync Wine synchronization primitive device + +- 243-254 Reserved for local use ++ 244-254 Reserved for local use + 255 Reserved for MISC_DYNAMIC_MINOR + + 11 char Raw keyboard device (Linux/SPARC only) +diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst +index d29b020e5622..d2ddbf9ad8cb 100644 +--- a/Documentation/userspace-api/index.rst ++++ b/Documentation/userspace-api/index.rst +@@ -25,6 +25,7 @@ place where this information is gathered. + iommu + media/index + sysfs-platform_profile ++ winesync + + .. only:: subproject and html + +diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst +index 599bd4493944..825c6b2d2623 100644 +--- a/Documentation/userspace-api/ioctl/ioctl-number.rst ++++ b/Documentation/userspace-api/ioctl/ioctl-number.rst +@@ -366,6 +366,8 @@ Code Seq# Include File Comments + + 0xF6 all LTTng Linux Trace Toolkit Next Generation + ++0xF7 00-0F uapi/linux/winesync.h Wine synchronization primitives ++ + 0xFD all linux/dm-ioctl.h + 0xFE all linux/isst_if.h + ==== ===== ======================================================= ================================================================ +diff --git a/Documentation/userspace-api/winesync.rst b/Documentation/userspace-api/winesync.rst +new file mode 100644 +index 000000000000..751c70f1ffce +--- /dev/null ++++ b/Documentation/userspace-api/winesync.rst +@@ -0,0 +1,373 @@ ++===================================== ++Wine synchronization primitive driver ++===================================== ++ ++This page documents the user-space API for the winesync driver. ++ ++winesync is a support driver for emulation of NT synchronization ++primitives by the Wine project. It exists because implementation in ++user-space, using existing tools, cannot satisfy performance, ++correctness, and security constraints. It is implemented entirely in ++software, and does not drive any hardware device. ++ ++This interface is meant as a compatibility tool only and should not be ++used for general synchronization; instead use generic, versatile ++interfaces such as futex(2) and poll(2). ++ ++Synchronization primitives ++========================== ++ ++The winesync driver exposes two types of synchronization primitives, ++semaphores and mutexes. ++ ++A semaphore holds a single volatile 32-bit counter, and a static ++32-bit integer denoting the maximum value. It is considered signaled ++when the counter is nonzero. The counter is decremented by one when a ++wait is satisfied. Both the initial and maximum count are established ++when the semaphore is created. ++ ++A mutex holds a volatile 32-bit recursion count, and a volatile 32-bit ++identifier denoting its owner. The latter is intended to identify the ++thread holding the mutex; however, it is not actually validated ++against earlier calls made by the same thread. A mutex is considered ++signaled when its owner is zero (indicating that it is not owned). The ++recursion count is incremented when a wait is satisfied, and ownership ++is set to the given identifier. A mutex also holds an internal flag ++denoting whether its previous owner has died; such a mutex is said to ++be inconsistent. Owner death is not tracked automatically based on ++thread death, but rather must be communicated using ++``WINESYNC_IOC_KILL_OWNER``. ++ ++Objects are represented by signed 32-bit integers. A valid object ++identifier will always be nonnegative. ++ ++Char device ++=========== ++ ++The winesync driver creates a single char device /dev/winesync. Each ++file description opened on the device represents a unique namespace. ++That is, objects created on one open file description are shared ++across all its individual descriptors, but are not shared with other ++open() calls on the same device. ++ ++ioctl reference ++=============== ++ ++All operations on the device are done through ioctls. There are three ++structures used in ioctl calls:: ++ ++ struct winesync_sem_args { ++ __s32 sem; ++ __u32 count; ++ __u32 max; ++ __u32 flags; ++ }; ++ ++ struct winesync_mutex_args { ++ __s32 mutex; ++ __u32 owner; ++ __u32 count; ++ }; ++ ++ struct winesync_wait_args { ++ __u64 timeout; ++ __u64 objs; ++ __u32 count; ++ __u32 owner; ++ __u32 index; ++ __u32 pad; ++ }; ++ ++Depending on the ioctl, members of the structure may be used as input, ++output, or not at all. ++ ++All ioctls return 0 on success, and -1 on error, in which case `errno` ++will be set to a nonzero error code. ++ ++The ioctls are as follows: ++ ++.. c:macro:: WINESYNC_IOC_CREATE_SEM ++ ++ Create a semaphore object. Takes a pointer to struct ++ :c:type:`winesync_sem_args`, which is used as follows: ++ ++ ``count`` and ``max`` are input-only arguments, denoting the ++ initial and maximum count of the semaphore. ++ ++ ``flags`` is an input-only argument, which specifies additional ++ flags modifying the behaviour of the semaphore. There is only one ++ flag defined, ``WINESYNC_SEM_GETONWAIT``. If present, wait ++ operations on this semaphore will acquire it, decrementing its ++ count by one; otherwise, wait operations will not affect the ++ semaphore's state. ++ ++ ``sem`` is an output-only argument, which will be filled with the ++ allocated identifier if successful. ++ ++ Fails with ``EINVAL`` if ``count`` is greater than ``max``, or ++ ``ENOMEM`` if not enough memory is available. ++ ++.. c:macro:: WINESYNC_IOC_CREATE_MUTEX ++ ++ Create a mutex object. Takes a pointer to struct ++ :c:type:`winesync_mutex_args`, which is used as follows: ++ ++ ``owner`` is an input-only argument denoting the initial owner of ++ the mutex. ++ ++ ``count`` is an input-only argument denoting the initial recursion ++ count of the mutex. If ``owner`` is nonzero and ``count`` is zero, ++ or if ``owner`` is zero and ``count`` is nonzero, the function ++ fails with ``EINVAL``. ++ ++ ``mutex`` is an output-only argument, which will be filled with ++ the allocated identifier if successful. ++ ++ Fails with ``ENOMEM`` if not enough memory is available. ++ ++.. c:macro:: WINESYNC_IOC_DELETE ++ ++ Delete an object of any type. Takes an input-only pointer to a ++ 32-bit integer denoting the object to delete. Fails with ``EINVAL`` ++ if the object is not valid. Further ioctls attempting to use the ++ object return ``EINVAL``, unless the object identifier is reused. ++ However, wait ioctls currently in progress are not interrupted, and ++ behave as if the object remains valid. ++ ++.. c:macro:: WINESYNC_IOC_PUT_SEM ++ ++ Post to a semaphore object. Takes a pointer to struct ++ :c:type:`winesync_sem_args`, which is used as follows: ++ ++ ``sem`` is an input-only argument denoting the semaphore object. ++ If ``sem`` is not a valid semaphore object, the ioctl fails with ++ ``EINVAL``. ++ ++ ``count`` contains on input the count to add to the semaphore, and ++ on output is filled with its previous count. ++ ++ ``max`` and ``flags`` are not used. ++ ++ The operation is atomic and totally ordered with respect to other ++ operations on the same semaphore. If adding ``count`` to the ++ semaphore's current count would raise the latter past the ++ semaphore's maximum count, the ioctl fails with ``EOVERFLOW`` and ++ the semaphore is not affected. If raising the semaphore's count ++ causes it to become signaled, eligible threads waiting on this ++ semaphore will be woken and the semaphore's count decremented ++ appropriately. ++ ++.. c:macro:: WINESYNC_IOC_PULSE_SEM ++ ++ This operation is identical to ``WINESYNC_IOC_PUT_SEM``, with one ++ notable exception: the semaphore is always left in an *unsignaled* ++ state, regardless of the initial count or the count added by the ++ ioctl. That is, the count after a pulse operation will always be ++ zero. The entire operation is atomic. ++ ++ Hence, if the semaphore was created with the ++ ``WINESYNC_SEM_GETONWAIT`` flag set, and an unsignaled semaphore is ++ "pulsed" with a count of 2, at most two eligible threads (i.e. ++ threads not otherwise constrained due to ``WINESYNC_IOC_WAIT_ALL``) ++ will be woken up, and any others will remain sleeping. If less than ++ two eligible threads are waiting on the semaphore, all of them will ++ be woken up, and the semaphore's count will remain at zero. On the ++ other hand, if the semaphore was created without the ++ ``WINESYNC_SEM_GETONWAIT``, all eligible threads will be woken up, ++ making ``count`` effectively redundant. In either case, a ++ simultaneous ``WINESYNC_IOC_READ_SEM`` ioctl from another thread ++ will always report a count of zero. ++ ++ If adding ``count`` to the semaphore's current count would raise the ++ latter past the semaphore's maximum count, the ioctl fails with ++ ``EOVERFLOW``. However, in this case the semaphore's count will ++ still be reset to zero. ++ ++.. c:macro:: WINESYNC_IOC_GET_SEM ++ ++ Attempt to acquire a semaphore object. Takes an input-only pointer ++ to a 32-bit integer denoting the semaphore to acquire. ++ ++ This operation does not block. If the semaphore's count was zero, it ++ fails with ``EWOULDBLOCK``. Otherwise, the semaphore's count is ++ decremented by one. The behaviour of this operation is unaffected by ++ whether the semaphore was created with the ++ ``WINESYNC_SEM_GETONWAIT`` flag set. ++ ++ The operation is atomic and totally ordered with respect to other ++ operations on the same semaphore. ++ ++.. c:macro:: WINESYNC_IOC_PUT_MUTEX ++ ++ Release a mutex object. Takes a pointer to struct ++ :c:type:`winesync_mutex_args`, which is used as follows: ++ ++ ``mutex`` is an input-only argument denoting the mutex object. If ++ ``mutex`` is not a valid mutex object, the ioctl fails with ++ ``EINVAL``. ++ ++ ``owner`` is an input-only argument denoting the mutex owner. ++ ``owner`` must be nonzero, else the ioctl fails with ``EINVAL``. ++ If ``owner`` is not the current owner of the mutex, the ioctl ++ fails with ``EPERM``. ++ ++ ``count`` is an output-only argument which will be filled on ++ success with the mutex's previous recursion count. ++ ++ The mutex's count will be decremented by one. The operation is ++ atomic and totally ordered with respect to other operations on the ++ same mutex. If decrementing the mutex's count causes it to become ++ zero, the mutex is marked as unowned and signaled, and eligible ++ threads waiting on it will be woken as appropriate. ++ ++.. c:macro:: WINESYNC_IOC_READ_SEM ++ ++ Read the current state of a semaphore object. Takes a pointer to ++ struct :c:type:`winesync_sem_args`, which is used as follows: ++ ++ ``sem`` is an input-only argument denoting the semaphore object. ++ If ``sem`` is not a valid semaphore object, the ioctl fails with ++ ``EINVAL``. ++ ++ ``count`` and ``max`` are output-only arguments, which will be ++ filled with the current and maximum count of the given semaphore. ++ ++ ``flags`` is an output-only argument, which will be filled with ++ the flags used to create the semaphore. ++ ++ The operation is atomic and totally ordered with respect to other ++ operations on the same semaphore. ++ ++.. c:macro:: WINESYNC_IOC_READ_MUTEX ++ ++ Read the current state of a mutex object. Takes a pointer to struct ++ :c:type:`winesync_mutex_args`, which is used as follows: ++ ++ ``mutex`` is an input-only argument denoting the mutex object. If ++ ``mutex`` is not a valid mutex object, the ioctl fails with ++ ``EINVAL``. ++ ++ ``count`` and ``owner`` are output-only arguments, which will be ++ filled with the current recursion count and owner of the given ++ mutex. If the mutex is not owned, both ``count`` and ``owner`` are ++ set to zero. ++ ++ If the mutex is marked as inconsistent, the function fails with ++ ``EOWNERDEAD``. ++ ++ The operation is atomic and totally ordered with respect to other ++ operations on the same mutex. ++ ++.. c:macro:: WINESYNC_IOC_KILL_OWNER ++ ++ Mark any mutexes owned by the given identifier as unowned and ++ inconsistent. Takes an input-only pointer to a 32-bit integer ++ denoting the owner. If the owner is zero, the ioctl fails with ++ ``EINVAL``. ++ ++.. c:macro:: WINESYNC_IOC_WAIT_ANY ++ ++ Poll on any of a list of objects, atomically acquiring (at most) ++ one. Takes a pointer to struct :c:type:`winesync_wait_args`, which ++ is used as follows: ++ ++ ``timeout`` is an optional input-only pointer to a 64-bit struct ++ :c:type:`timespec` (specified as an integer so that the structure ++ has the same size regardless of architecture). The timeout is ++ specified in absolute format, as measured against the MONOTONIC ++ clock. If the timeout is equal to or earlier than the current ++ time, the function returns immediately without sleeping. If ++ ``timeout`` is zero, i.e. NULL, the function will sleep until an ++ object is signaled, and will not fail with ``ETIMEDOUT``. ++ ++ ``objs`` is a input-only pointer to an array of ``count`` 32-bit ++ object identifiers (specified as an integer so that the structure ++ has the same size regardless of architecture). If any identifier ++ is invalid, the function fails with ``EINVAL``. ++ ++ ``count`` is an input-only argument denoting the number of ++ elements in ``objs``. ++ ++ ``owner`` is an input-only argument denoting the mutex owner ++ identifier. If any object in ``objs`` is a mutex, the ioctl will ++ attempt to acquire that mutex on behalf of ``owner``. If ``owner`` ++ is zero, the ioctl fails with ``EINVAL``. ++ ++ ``index`` is an output-only argument which, if the ioctl is ++ successful, is filled with the index of the object actually ++ signaled. ++ ++ ``pad`` is unused, and exists to keep a consistent structure size. ++ ++ This function attempts to acquire one of the given objects. If ++ unable to do so, it sleeps until an object becomes signaled, ++ subsequently acquiring it, or the timeout expires. In the latter ++ case the ioctl fails with ``ETIMEDOUT``. The function only acquires ++ one object, even if multiple objects are signaled. ++ ++ A semaphore is considered to be signaled if its count is nonzero. It ++ is acquired by decrementing its count by one if the ++ ``WINESYNC_SEM_GETONWAIT`` flag was used to create it; otherwise no ++ operation is done to acquire the semaphore. A mutex is considered to ++ be signaled if it is unowned or if its owner matches the ``owner`` ++ argument, and is acquired by incrementing its recursion count by one ++ and setting its owner to the ``owner`` argument. ++ ++ Acquisition is atomic and totally ordered with respect to other ++ operations on the same object. If two wait operations (with ++ different ``owner`` identifiers) are queued on the same mutex, only ++ one is signaled. If two wait operations are queued on the same ++ semaphore (which was not created with the ``WINESYNC_SEM_GETONWAIT`` ++ flag set), and a value of one is posted to it, only one is signaled. ++ The order in which threads are signaled is not guaranteed. ++ ++ (If two wait operations are queued on the same semaphore, and the ++ semaphore was created with the ``WINESYNC_SEM_GETONWAIT`` flag set, ++ and a value of one is posted to it, both threads are signaled, and ++ the semaphore retains a count of one.) ++ ++ If an inconsistent mutex is acquired, the ioctl fails with ++ ``EOWNERDEAD``. Although this is a failure return, the function may ++ otherwise be considered successful. The mutex is marked as owned by ++ the given owner (with a recursion count of 1) and as no longer ++ inconsistent. ``index`` is still set to the index of the mutex. ++ ++ Unlike ``WINESYNC_IOC_WAIT_ALL``, it is valid to pass the same ++ object more than once. If a wakeup occurs due to that object being ++ signaled, ``index`` is set to the index of the first instance of the ++ object. ++ ++ Fails with ``ENOMEM`` if not enough memory is available, or ++ ``EINTR`` if a signal is received. ++ ++.. c:macro:: WINESYNC_IOC_WAIT_ALL ++ ++ Poll on a list of objects, atomically acquiring all of them. Takes a ++ pointer to struct :c:type:`winesync_wait_args`, which is used ++ identically to ``WINESYNC_IOC_WAIT_ANY``, except that ``index`` is ++ unused. ++ ++ This function attempts to simultaneously acquire all of the given ++ objects. If unable to do so, it sleeps until all objects become ++ simultaneously signaled, subsequently acquiring them, or the timeout ++ expires. In the latter case the ioctl fails with ``ETIMEDOUT`` and ++ no objects are modified. ++ ++ Objects may become signaled and subsequently designaled (through ++ acquisition by other threads) while this thread is sleeping. Only ++ once all objects are simultaneously signaled does the ioctl return. ++ The acquisition is atomic and totally ordered with respect to other ++ operations on any of the given objects. ++ ++ If an inconsistent mutex is acquired, the ioctl fails with ++ ``EOWNERDEAD``. Similarly to ``WINESYNC_IOC_WAIT_ANY``, all objects ++ are nevertheless marked as acquired. Note that if multiple mutex ++ objects are specified, there is no way to know which were marked as ++ inconsistent. ++ ++ Unlike ``WINESYNC_IOC_WAIT_ALL``, it is not valid to pass the same ++ object more than once. If this is attempted, the function fails with ++ ``EINVAL``. ++ ++ Fails with ``ENOMEM`` if not enough memory is available, or ++ ``EINTR`` if a signal is received. +diff --git a/MAINTAINERS b/MAINTAINERS +index 9450e052f1b1..d33e317759bf 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -19351,6 +19351,15 @@ M: David Härdeman + S: Maintained + F: drivers/media/rc/winbond-cir.c + ++WINESYNC SYNCHRONIZATION PRIMITIVE DRIVER ++M: Zebediah Figura ++L: wine-devel@winehq.org ++S: Supported ++F: Documentation/userspace-api/winesync.rst ++F: drivers/misc/winesync.c ++F: include/uapi/linux/winesync.c ++F: tools/testing/selftests/drivers/winesync/ ++ + WINSYSTEMS EBC-C384 WATCHDOG DRIVER + M: William Breathitt Gray + L: linux-watchdog@vger.kernel.org +diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig +index f532c59bb59b..496f20d69914 100644 +--- a/drivers/misc/Kconfig ++++ b/drivers/misc/Kconfig +@@ -445,6 +445,17 @@ config HISI_HIKEY_USB + switching between the dual-role USB-C port and the USB-A host ports + using only one USB controller. + ++config WINESYNC ++ tristate "Synchronization primitives for Wine" ++ help ++ This module provides kernel support for synchronization primitives ++ used by Wine. It is not a hardware driver. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called winesync. ++ ++ If unsure, say N. ++ + source "drivers/misc/c2port/Kconfig" + source "drivers/misc/eeprom/Kconfig" + source "drivers/misc/cb710/Kconfig" +diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile +index 99b6f15a3c70..327c9628c479 100644 +--- a/drivers/misc/Makefile ++++ b/drivers/misc/Makefile +@@ -56,3 +56,4 @@ obj-$(CONFIG_HABANA_AI) += habanalabs/ + obj-$(CONFIG_UACCE) += uacce/ + obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o + obj-$(CONFIG_HISI_HIKEY_USB) += hisi_hikey_usb.o ++obj-$(CONFIG_WINESYNC) += winesync.o +diff --git a/drivers/misc/winesync.c b/drivers/misc/winesync.c +new file mode 100644 +index 000000000000..ff5749206aa9 +--- /dev/null ++++ b/drivers/misc/winesync.c +@@ -0,0 +1,1047 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * winesync.c - Kernel driver for Wine synchronization primitives ++ * ++ * Copyright (C) 2021 Zebediah Figura ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define WINESYNC_NAME "winesync" ++ ++enum winesync_type { ++ WINESYNC_TYPE_SEM, ++ WINESYNC_TYPE_MUTEX, ++}; ++ ++struct winesync_obj { ++ struct kref refcount; ++ spinlock_t lock; ++ ++ /* ++ * any_waiters is protected by the object lock, but all_waiters is ++ * protected by the device wait_all_lock. ++ */ ++ struct list_head any_waiters; ++ struct list_head all_waiters; ++ ++ /* ++ * Hint describing how many tasks are queued on this object in a ++ * wait-all operation. ++ * ++ * Any time we do a wake, we may need to wake "all" waiters as well as ++ * "any" waiters. In order to atomically wake "all" waiters, we must ++ * lock all of the objects, and that means grabbing the wait_all_lock ++ * below (and, due to lock ordering rules, before locking this object). ++ * However, wait-all is a rare operation, and grabbing the wait-all ++ * lock for every wake would create unnecessary contention. Therefore we ++ * first check whether all_hint is one, and, if it is, we skip trying ++ * to wake "all" waiters. ++ * ++ * This "refcount" isn't protected by any lock. It might change during ++ * the course of a wake, but there's no meaningful race there; it's only ++ * a hint. ++ * ++ * Use refcount_t rather than atomic_t to take advantage of saturation. ++ * This does mean that the "no waiters" case is signified by all_hint ++ * being one, rather than zero (otherwise we would get spurious ++ * warnings). ++ */ ++ refcount_t all_hint; ++ ++ enum winesync_type type; ++ ++ /* The following fields are protected by the object lock. */ ++ union { ++ struct { ++ __u32 count; ++ __u32 max; ++ __u32 flags; ++ } sem; ++ struct { ++ __u32 count; ++ __u32 owner; ++ bool ownerdead; ++ } mutex; ++ } u; ++}; ++ ++struct winesync_q_entry { ++ struct list_head node; ++ struct winesync_q *q; ++ struct winesync_obj *obj; ++ __u32 index; ++}; ++ ++struct winesync_q { ++ struct task_struct *task; ++ __u32 owner; ++ ++ /* ++ * Protected via atomic_cmpxchg(). Only the thread that wins the ++ * compare-and-swap may actually change object states and wake this ++ * task. ++ */ ++ atomic_t signaled; ++ ++ bool all; ++ bool ownerdead; ++ __u32 count; ++ struct winesync_q_entry entries[]; ++}; ++ ++struct winesync_device { ++ /* ++ * Wait-all operations must atomically grab all objects, and be totally ++ * ordered with respect to each other and wait-any operations. If one ++ * thread is trying to acquire several objects, another thread cannot ++ * touch the object at the same time. ++ * ++ * We achieve this by grabbing multiple object locks at the same time. ++ * However, this creates a lock ordering problem. To solve that problem, ++ * wait_all_lock is taken first whenever multiple objects must be locked ++ * at the same time. ++ */ ++ spinlock_t wait_all_lock; ++ struct mutex table_lock; ++ struct idr objects; ++}; ++ ++static struct winesync_obj *get_obj(struct winesync_device *dev, int id) ++{ ++ struct winesync_obj *obj; ++ ++ rcu_read_lock(); ++ obj = idr_find(&dev->objects, id); ++ if (obj && !kref_get_unless_zero(&obj->refcount)) ++ obj = NULL; ++ rcu_read_unlock(); ++ ++ return obj; ++} ++ ++static void destroy_obj(struct kref *ref) ++{ ++ struct winesync_obj *obj; ++ ++ obj = container_of(ref, struct winesync_obj, refcount); ++ kfree(obj); ++} ++ ++static void put_obj(struct winesync_obj *obj) ++{ ++ kref_put(&obj->refcount, destroy_obj); ++} ++ ++static int winesync_char_open(struct inode *inode, struct file *file) ++{ ++ struct winesync_device *dev; ++ ++ dev = kzalloc(sizeof(*dev), GFP_KERNEL); ++ if (!dev) ++ return -ENOMEM; ++ ++ idr_init(&dev->objects); ++ spin_lock_init(&dev->wait_all_lock); ++ mutex_init(&dev->table_lock); ++ ++ file->private_data = dev; ++ return nonseekable_open(inode, file); ++} ++ ++static int winesync_char_release(struct inode *inode, struct file *file) ++{ ++ struct winesync_device *dev = file->private_data; ++ struct winesync_obj *obj; ++ int id; ++ ++ mutex_lock(&dev->table_lock); ++ idr_for_each_entry(&dev->objects, obj, id) { ++ idr_remove(&dev->objects, id); ++ synchronize_rcu(); ++ put_obj(obj); ++ } ++ mutex_unlock(&dev->table_lock); ++ ++ kfree(dev); ++ ++ return 0; ++} ++ ++static void winesync_init_obj(struct winesync_obj *obj) ++{ ++ kref_init(&obj->refcount); ++ refcount_set(&obj->all_hint, 1); ++ spin_lock_init(&obj->lock); ++ INIT_LIST_HEAD(&obj->any_waiters); ++ INIT_LIST_HEAD(&obj->all_waiters); ++} ++ ++static bool is_signaled(struct winesync_obj *obj, __u32 owner) ++{ ++ lockdep_assert_held(&obj->lock); ++ ++ switch (obj->type) { ++ case WINESYNC_TYPE_SEM: ++ return !!obj->u.sem.count; ++ case WINESYNC_TYPE_MUTEX: ++ if (obj->u.mutex.owner && obj->u.mutex.owner != owner) ++ return false; ++ return obj->u.mutex.count < UINT_MAX; ++ } ++ ++ WARN(1, "bad object type %#x\n", obj->type); ++ return false; ++} ++ ++/* ++ * "locked_obj" is an optional pointer to an object which is already locked and ++ * should not be locked again. This is necessary so that changing an object's ++ * state and waking it can be a single atomic operation. ++ */ ++static void try_wake_all(struct winesync_device *dev, struct winesync_q *q, ++ struct winesync_obj *locked_obj) ++{ ++ __u32 count = q->count; ++ bool can_wake = true; ++ __u32 i; ++ ++ lockdep_assert_held(&dev->wait_all_lock); ++ if (locked_obj) ++ lockdep_assert_held(&locked_obj->lock); ++ ++ for (i = 0; i < count; i++) { ++ if (q->entries[i].obj != locked_obj) ++ spin_lock(&q->entries[i].obj->lock); ++ } ++ ++ for (i = 0; i < count; i++) { ++ if (!is_signaled(q->entries[i].obj, q->owner)) { ++ can_wake = false; ++ break; ++ } ++ } ++ ++ if (can_wake && atomic_cmpxchg(&q->signaled, -1, 0) == -1) { ++ for (i = 0; i < count; i++) { ++ struct winesync_obj *obj = q->entries[i].obj; ++ ++ switch (obj->type) { ++ case WINESYNC_TYPE_SEM: ++ if (obj->u.sem.flags & WINESYNC_SEM_GETONWAIT) ++ obj->u.sem.count--; ++ break; ++ case WINESYNC_TYPE_MUTEX: ++ if (obj->u.mutex.ownerdead) ++ q->ownerdead = true; ++ obj->u.mutex.ownerdead = false; ++ obj->u.mutex.count++; ++ obj->u.mutex.owner = q->owner; ++ break; ++ } ++ } ++ wake_up_process(q->task); ++ } ++ ++ for (i = 0; i < count; i++) { ++ if (q->entries[i].obj != locked_obj) ++ spin_unlock(&q->entries[i].obj->lock); ++ } ++} ++ ++static void try_wake_all_obj(struct winesync_device *dev, ++ struct winesync_obj *obj) ++{ ++ struct winesync_q_entry *entry; ++ ++ lockdep_assert_held(&dev->wait_all_lock); ++ lockdep_assert_held(&obj->lock); ++ ++ list_for_each_entry(entry, &obj->all_waiters, node) ++ try_wake_all(dev, entry->q, obj); ++} ++ ++static void try_wake_any_sem(struct winesync_obj *sem) ++{ ++ struct winesync_q_entry *entry; ++ ++ lockdep_assert_held(&sem->lock); ++ ++ list_for_each_entry(entry, &sem->any_waiters, node) { ++ struct winesync_q *q = entry->q; ++ ++ if (!sem->u.sem.count) ++ break; ++ ++ if (atomic_cmpxchg(&q->signaled, -1, entry->index) == -1) { ++ if (sem->u.sem.flags & WINESYNC_SEM_GETONWAIT) ++ sem->u.sem.count--; ++ wake_up_process(q->task); ++ } ++ } ++} ++ ++static void try_wake_any_mutex(struct winesync_obj *mutex) ++{ ++ struct winesync_q_entry *entry; ++ ++ lockdep_assert_held(&mutex->lock); ++ ++ list_for_each_entry(entry, &mutex->any_waiters, node) { ++ struct winesync_q *q = entry->q; ++ ++ if (mutex->u.mutex.count == UINT_MAX) ++ break; ++ if (mutex->u.mutex.owner && mutex->u.mutex.owner != q->owner) ++ continue; ++ ++ if (atomic_cmpxchg(&q->signaled, -1, entry->index) == -1) { ++ if (mutex->u.mutex.ownerdead) ++ q->ownerdead = true; ++ mutex->u.mutex.ownerdead = false; ++ mutex->u.mutex.count++; ++ mutex->u.mutex.owner = q->owner; ++ wake_up_process(q->task); ++ } ++ } ++} ++ ++static int winesync_create_sem(struct winesync_device *dev, void __user *argp) ++{ ++ struct winesync_sem_args __user *user_args = argp; ++ struct winesync_sem_args args; ++ struct winesync_obj *sem; ++ int ret; ++ ++ if (copy_from_user(&args, argp, sizeof(args))) ++ return -EFAULT; ++ ++ if (args.count > args.max) ++ return -EINVAL; ++ ++ if (args.flags & ~WINESYNC_SEM_GETONWAIT) ++ return -EINVAL; ++ ++ sem = kzalloc(sizeof(*sem), GFP_KERNEL); ++ if (!sem) ++ return -ENOMEM; ++ ++ winesync_init_obj(sem); ++ sem->type = WINESYNC_TYPE_SEM; ++ sem->u.sem.count = args.count; ++ sem->u.sem.max = args.max; ++ sem->u.sem.flags = args.flags; ++ ++ mutex_lock(&dev->table_lock); ++ ret = idr_alloc(&dev->objects, sem, 0, 0, GFP_KERNEL); ++ mutex_unlock(&dev->table_lock); ++ ++ if (ret < 0) { ++ kfree(sem); ++ return ret; ++ } ++ ++ return put_user(ret, &user_args->sem); ++} ++ ++static int winesync_create_mutex(struct winesync_device *dev, void __user *argp) ++{ ++ struct winesync_mutex_args __user *user_args = argp; ++ struct winesync_mutex_args args; ++ struct winesync_obj *mutex; ++ int ret; ++ ++ if (copy_from_user(&args, argp, sizeof(args))) ++ return -EFAULT; ++ ++ if (!args.owner != !args.count) ++ return -EINVAL; ++ ++ mutex = kzalloc(sizeof(*mutex), GFP_KERNEL); ++ if (!mutex) ++ return -ENOMEM; ++ ++ winesync_init_obj(mutex); ++ mutex->type = WINESYNC_TYPE_MUTEX; ++ mutex->u.mutex.count = args.count; ++ mutex->u.mutex.owner = args.owner; ++ ++ mutex_lock(&dev->table_lock); ++ ret = idr_alloc(&dev->objects, mutex, 0, 0, GFP_KERNEL); ++ mutex_unlock(&dev->table_lock); ++ ++ if (ret < 0) { ++ kfree(mutex); ++ return ret; ++ } ++ ++ return put_user(ret, &user_args->mutex); ++} ++ ++static int winesync_delete(struct winesync_device *dev, void __user *argp) ++{ ++ struct winesync_obj *obj; ++ __s32 id; ++ ++ if (get_user(id, (__s32 __user *)argp)) ++ return -EFAULT; ++ ++ mutex_lock(&dev->table_lock); ++ obj = idr_remove(&dev->objects, id); ++ mutex_unlock(&dev->table_lock); ++ ++ if (!obj) ++ return -EINVAL; ++ ++ put_obj(obj); ++ return 0; ++} ++ ++static int winesync_get_sem(struct winesync_device *dev, void __user *argp) ++{ ++ struct winesync_obj *sem; ++ int ret = -EWOULDBLOCK; ++ __s32 id; ++ ++ if (get_user(id, (__s32 __user *)argp)) ++ return -EFAULT; ++ ++ sem = get_obj(dev, id); ++ if (!sem) ++ return -EINVAL; ++ if (sem->type != WINESYNC_TYPE_SEM) { ++ put_obj(sem); ++ return -EINVAL; ++ } ++ ++ spin_lock(&sem->lock); ++ ++ if (sem->u.sem.count) { ++ /* ++ * Decrement the semaphore's count, regardless of whether it ++ * has the WINESYNC_SEM_GETONWAIT flag set. ++ */ ++ sem->u.sem.count--; ++ ret = 0; ++ } ++ ++ spin_unlock(&sem->lock); ++ ++ put_obj(sem); ++ ++ return ret; ++} ++ ++/* ++ * Actually change the semaphore state, returning -EOVERFLOW if it is made ++ * invalid. ++ */ ++static int put_sem_state(struct winesync_obj *sem, __u32 count) ++{ ++ lockdep_assert_held(&sem->lock); ++ ++ if (sem->u.sem.count + count < sem->u.sem.count || ++ sem->u.sem.count + count > sem->u.sem.max) ++ return -EOVERFLOW; ++ ++ sem->u.sem.count += count; ++ return 0; ++} ++ ++static int winesync_put_sem(struct winesync_device *dev, void __user *argp, ++ bool pulse) ++{ ++ struct winesync_sem_args __user *user_args = argp; ++ struct winesync_sem_args args; ++ struct winesync_obj *sem; ++ __u32 prev_count; ++ int ret; ++ ++ if (copy_from_user(&args, argp, sizeof(args))) ++ return -EFAULT; ++ ++ sem = get_obj(dev, args.sem); ++ if (!sem) ++ return -EINVAL; ++ if (sem->type != WINESYNC_TYPE_SEM) { ++ put_obj(sem); ++ return -EINVAL; ++ } ++ ++ if (refcount_read(&sem->all_hint) > 1) { ++ spin_lock(&dev->wait_all_lock); ++ spin_lock(&sem->lock); ++ ++ prev_count = sem->u.sem.count; ++ ret = put_sem_state(sem, args.count); ++ if (!ret) { ++ try_wake_all_obj(dev, sem); ++ try_wake_any_sem(sem); ++ } ++ ++ if (pulse) ++ sem->u.sem.count = 0; ++ ++ spin_unlock(&sem->lock); ++ spin_unlock(&dev->wait_all_lock); ++ } else { ++ spin_lock(&sem->lock); ++ ++ prev_count = sem->u.sem.count; ++ ret = put_sem_state(sem, args.count); ++ if (!ret) ++ try_wake_any_sem(sem); ++ ++ if (pulse) ++ sem->u.sem.count = 0; ++ ++ spin_unlock(&sem->lock); ++ } ++ ++ put_obj(sem); ++ ++ if (!ret && put_user(prev_count, &user_args->count)) ++ ret = -EFAULT; ++ ++ return ret; ++} ++ ++/* ++ * Actually change the mutex state, returning -EPERM if not the owner. ++ */ ++static int put_mutex_state(struct winesync_obj *mutex, ++ const struct winesync_mutex_args *args) ++{ ++ lockdep_assert_held(&mutex->lock); ++ ++ if (mutex->u.mutex.owner != args->owner) ++ return -EPERM; ++ ++ if (!--mutex->u.mutex.count) ++ mutex->u.mutex.owner = 0; ++ return 0; ++} ++ ++static int winesync_put_mutex(struct winesync_device *dev, void __user *argp) ++{ ++ struct winesync_mutex_args __user *user_args = argp; ++ struct winesync_mutex_args args; ++ struct winesync_obj *mutex; ++ __u32 prev_count; ++ int ret; ++ ++ if (copy_from_user(&args, argp, sizeof(args))) ++ return -EFAULT; ++ if (!args.owner) ++ return -EINVAL; ++ ++ mutex = get_obj(dev, args.mutex); ++ if (!mutex) ++ return -EINVAL; ++ if (mutex->type != WINESYNC_TYPE_MUTEX) { ++ put_obj(mutex); ++ return -EINVAL; ++ } ++ ++ if (refcount_read(&mutex->all_hint) > 1) { ++ spin_lock(&dev->wait_all_lock); ++ spin_lock(&mutex->lock); ++ ++ prev_count = mutex->u.mutex.count; ++ ret = put_mutex_state(mutex, &args); ++ if (!ret) { ++ try_wake_all_obj(dev, mutex); ++ try_wake_any_mutex(mutex); ++ } ++ ++ spin_unlock(&mutex->lock); ++ spin_unlock(&dev->wait_all_lock); ++ } else { ++ spin_lock(&mutex->lock); ++ ++ prev_count = mutex->u.mutex.count; ++ ret = put_mutex_state(mutex, &args); ++ if (!ret) ++ try_wake_any_mutex(mutex); ++ ++ spin_unlock(&mutex->lock); ++ } ++ ++ put_obj(mutex); ++ ++ if (!ret && put_user(prev_count, &user_args->count)) ++ ret = -EFAULT; ++ ++ return ret; ++} ++ ++static int winesync_read_sem(struct winesync_device *dev, void __user *argp) ++{ ++ struct winesync_sem_args __user *user_args = argp; ++ struct winesync_sem_args args; ++ struct winesync_obj *sem; ++ __s32 id; ++ ++ if (get_user(id, &user_args->sem)) ++ return -EFAULT; ++ ++ sem = get_obj(dev, id); ++ if (!sem) ++ return -EINVAL; ++ if (sem->type != WINESYNC_TYPE_SEM) { ++ put_obj(sem); ++ return -EINVAL; ++ } ++ ++ args.sem = id; ++ spin_lock(&sem->lock); ++ args.count = sem->u.sem.count; ++ args.max = sem->u.sem.max; ++ args.flags = sem->u.sem.flags; ++ spin_unlock(&sem->lock); ++ ++ put_obj(sem); ++ ++ if (copy_to_user(user_args, &args, sizeof(args))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int winesync_read_mutex(struct winesync_device *dev, void __user *argp) ++{ ++ struct winesync_mutex_args __user *user_args = argp; ++ struct winesync_mutex_args args; ++ struct winesync_obj *mutex; ++ __s32 id; ++ int ret; ++ ++ if (get_user(id, &user_args->mutex)) ++ return -EFAULT; ++ ++ mutex = get_obj(dev, id); ++ if (!mutex) ++ return -EINVAL; ++ if (mutex->type != WINESYNC_TYPE_MUTEX) { ++ put_obj(mutex); ++ return -EINVAL; ++ } ++ ++ args.mutex = id; ++ spin_lock(&mutex->lock); ++ args.count = mutex->u.mutex.count; ++ args.owner = mutex->u.mutex.owner; ++ ret = mutex->u.mutex.ownerdead ? -EOWNERDEAD : 0; ++ spin_unlock(&mutex->lock); ++ ++ put_obj(mutex); ++ ++ if (copy_to_user(user_args, &args, sizeof(args))) ++ return -EFAULT; ++ return ret; ++} ++ ++/* ++ * Actually change the mutex state to mark its owner as dead. ++ */ ++static void put_mutex_ownerdead_state(struct winesync_obj *mutex) ++{ ++ lockdep_assert_held(&mutex->lock); ++ ++ mutex->u.mutex.ownerdead = true; ++ mutex->u.mutex.owner = 0; ++ mutex->u.mutex.count = 0; ++} ++ ++static int winesync_kill_owner(struct winesync_device *dev, void __user *argp) ++{ ++ struct winesync_obj *obj; ++ __u32 owner; ++ int id; ++ ++ if (get_user(owner, (__u32 __user *)argp)) ++ return -EFAULT; ++ if (!owner) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ ++ idr_for_each_entry(&dev->objects, obj, id) { ++ if (!kref_get_unless_zero(&obj->refcount)) ++ continue; ++ ++ if (obj->type != WINESYNC_TYPE_MUTEX) { ++ put_obj(obj); ++ continue; ++ } ++ ++ if (refcount_read(&obj->all_hint) > 1) { ++ spin_lock(&dev->wait_all_lock); ++ spin_lock(&obj->lock); ++ ++ if (obj->u.mutex.owner == owner) { ++ put_mutex_ownerdead_state(obj); ++ try_wake_all_obj(dev, obj); ++ try_wake_any_mutex(obj); ++ } ++ ++ spin_unlock(&obj->lock); ++ spin_unlock(&dev->wait_all_lock); ++ } else { ++ spin_lock(&obj->lock); ++ ++ if (obj->u.mutex.owner == owner) { ++ put_mutex_ownerdead_state(obj); ++ try_wake_any_mutex(obj); ++ } ++ ++ spin_unlock(&obj->lock); ++ } ++ ++ put_obj(obj); ++ } ++ ++ rcu_read_unlock(); ++ ++ return 0; ++} ++ ++static int winesync_schedule(const struct winesync_q *q, ktime_t *timeout) ++{ ++ int ret = 0; ++ ++ do { ++ if (signal_pending(current)) { ++ ret = -ERESTARTSYS; ++ break; ++ } ++ ++ set_current_state(TASK_INTERRUPTIBLE); ++ if (atomic_read(&q->signaled) != -1) { ++ ret = 0; ++ break; ++ } ++ ret = schedule_hrtimeout(timeout, HRTIMER_MODE_ABS); ++ } while (ret < 0); ++ __set_current_state(TASK_RUNNING); ++ ++ return ret; ++} ++ ++/* ++ * Allocate and initialize most of the winesync_q structure, but do not queue us ++ * yet. Also, calculate the relative timeout in jiffies. ++ */ ++static int setup_wait(struct winesync_device *dev, ++ const struct winesync_wait_args *args, bool all, ++ ktime_t *ret_timeout, struct winesync_q **ret_q) ++{ ++ const __u32 count = args->count; ++ struct winesync_q *q; ++ ktime_t timeout = 0; ++ __s32 *ids; ++ __u32 i, j; ++ ++ if (args->timeout) { ++ struct timespec64 to; ++ ++ if (get_timespec64(&to, u64_to_user_ptr(args->timeout))) ++ return -EFAULT; ++ if (!timespec64_valid(&to)) ++ return -EINVAL; ++ ++ timeout = timespec64_to_ns(&to); ++ } ++ ++ ids = kmalloc_array(args->count, sizeof(*ids), GFP_KERNEL); ++ if (!ids) ++ return -ENOMEM; ++ if (copy_from_user(ids, u64_to_user_ptr(args->objs), ++ array_size(args->count, sizeof(*ids)))) { ++ kfree(ids); ++ return -EFAULT; ++ } ++ ++ q = kmalloc(struct_size(q, entries, count), GFP_KERNEL); ++ if (!q) { ++ kfree(ids); ++ return -ENOMEM; ++ } ++ q->task = current; ++ q->owner = args->owner; ++ atomic_set(&q->signaled, -1); ++ q->all = all; ++ q->ownerdead = false; ++ q->count = count; ++ ++ for (i = 0; i < count; i++) { ++ struct winesync_q_entry *entry = &q->entries[i]; ++ struct winesync_obj *obj = get_obj(dev, ids[i]); ++ ++ if (!obj) ++ goto err; ++ ++ if (all) { ++ /* Check that the objects are all distinct. */ ++ for (j = 0; j < i; j++) { ++ if (obj == q->entries[j].obj) { ++ put_obj(obj); ++ goto err; ++ } ++ } ++ } ++ ++ entry->obj = obj; ++ entry->q = q; ++ entry->index = i; ++ } ++ ++ kfree(ids); ++ ++ *ret_q = q; ++ *ret_timeout = timeout; ++ return 0; ++ ++err: ++ for (j = 0; j < i; j++) ++ put_obj(q->entries[j].obj); ++ kfree(ids); ++ kfree(q); ++ return -EINVAL; ++} ++ ++static void try_wake_any_obj(struct winesync_obj *obj) ++{ ++ switch (obj->type) { ++ case WINESYNC_TYPE_SEM: ++ try_wake_any_sem(obj); ++ break; ++ case WINESYNC_TYPE_MUTEX: ++ try_wake_any_mutex(obj); ++ break; ++ } ++} ++ ++static int winesync_wait_any(struct winesync_device *dev, void __user *argp) ++{ ++ struct winesync_wait_args args; ++ struct winesync_q *q; ++ ktime_t timeout; ++ __u32 i; ++ int ret; ++ ++ if (copy_from_user(&args, argp, sizeof(args))) ++ return -EFAULT; ++ if (!args.owner) ++ return -EINVAL; ++ ++ ret = setup_wait(dev, &args, false, &timeout, &q); ++ if (ret < 0) ++ return ret; ++ ++ /* queue ourselves */ ++ ++ for (i = 0; i < args.count; i++) { ++ struct winesync_q_entry *entry = &q->entries[i]; ++ struct winesync_obj *obj = q->entries[i].obj; ++ ++ spin_lock(&obj->lock); ++ list_add_tail(&entry->node, &obj->any_waiters); ++ spin_unlock(&obj->lock); ++ } ++ ++ /* check if we are already signaled */ ++ ++ for (i = 0; i < args.count; i++) { ++ struct winesync_obj *obj = q->entries[i].obj; ++ ++ if (atomic_read(&q->signaled) != -1) ++ break; ++ ++ spin_lock(&obj->lock); ++ try_wake_any_obj(obj); ++ spin_unlock(&obj->lock); ++ } ++ ++ /* sleep */ ++ ++ ret = winesync_schedule(q, args.timeout ? &timeout : NULL); ++ ++ /* and finally, unqueue */ ++ ++ for (i = 0; i < args.count; i++) { ++ struct winesync_obj *obj = q->entries[i].obj; ++ ++ spin_lock(&obj->lock); ++ list_del(&q->entries[i].node); ++ spin_unlock(&obj->lock); ++ ++ put_obj(obj); ++ } ++ ++ if (atomic_read(&q->signaled) != -1) { ++ struct winesync_wait_args __user *user_args = argp; ++ ++ /* even if we caught a signal, we need to communicate success */ ++ ret = q->ownerdead ? -EOWNERDEAD : 0; ++ ++ if (put_user(atomic_read(&q->signaled), &user_args->index)) ++ ret = -EFAULT; ++ } else if (!ret) { ++ ret = -ETIMEDOUT; ++ } ++ ++ kfree(q); ++ return ret; ++} ++ ++static int winesync_wait_all(struct winesync_device *dev, void __user *argp) ++{ ++ struct winesync_wait_args args; ++ struct winesync_q *q; ++ ktime_t timeout; ++ __u32 i; ++ int ret; ++ ++ if (copy_from_user(&args, argp, sizeof(args))) ++ return -EFAULT; ++ if (!args.owner) ++ return -EINVAL; ++ ++ ret = setup_wait(dev, &args, true, &timeout, &q); ++ if (ret < 0) ++ return ret; ++ ++ /* queue ourselves */ ++ ++ spin_lock(&dev->wait_all_lock); ++ ++ for (i = 0; i < args.count; i++) { ++ struct winesync_q_entry *entry = &q->entries[i]; ++ struct winesync_obj *obj = q->entries[i].obj; ++ ++ refcount_inc(&obj->all_hint); ++ ++ /* ++ * obj->all_waiters is protected by dev->wait_all_lock rather ++ * than obj->lock, so there is no need to acquire it here. ++ */ ++ list_add_tail(&entry->node, &obj->all_waiters); ++ } ++ ++ /* check if we are already signaled */ ++ ++ try_wake_all(dev, q, NULL); ++ ++ spin_unlock(&dev->wait_all_lock); ++ ++ /* sleep */ ++ ++ ret = winesync_schedule(q, args.timeout ? &timeout : NULL); ++ ++ /* and finally, unqueue */ ++ ++ spin_lock(&dev->wait_all_lock); ++ ++ for (i = 0; i < args.count; i++) { ++ struct winesync_q_entry *entry = &q->entries[i]; ++ struct winesync_obj *obj = q->entries[i].obj; ++ ++ /* ++ * obj->all_waiters is protected by dev->wait_all_lock rather ++ * than obj->lock, so there is no need to acquire it here. ++ */ ++ list_del(&entry->node); ++ ++ refcount_dec(&obj->all_hint); ++ ++ put_obj(obj); ++ } ++ ++ spin_unlock(&dev->wait_all_lock); ++ ++ if (atomic_read(&q->signaled) != -1) { ++ /* even if we caught a signal, we need to communicate success */ ++ ret = q->ownerdead ? -EOWNERDEAD : 0; ++ } else if (!ret) { ++ ret = -ETIMEDOUT; ++ } ++ ++ kfree(q); ++ return ret; ++} ++ ++static long winesync_char_ioctl(struct file *file, unsigned int cmd, ++ unsigned long parm) ++{ ++ struct winesync_device *dev = file->private_data; ++ void __user *argp = (void __user *)parm; ++ ++ switch (cmd) { ++ case WINESYNC_IOC_CREATE_SEM: ++ return winesync_create_sem(dev, argp); ++ case WINESYNC_IOC_CREATE_MUTEX: ++ return winesync_create_mutex(dev, argp); ++ case WINESYNC_IOC_DELETE: ++ return winesync_delete(dev, argp); ++ case WINESYNC_IOC_GET_SEM: ++ return winesync_get_sem(dev, argp); ++ case WINESYNC_IOC_PUT_SEM: ++ return winesync_put_sem(dev, argp, false); ++ case WINESYNC_IOC_PULSE_SEM: ++ return winesync_put_sem(dev, argp, true); ++ case WINESYNC_IOC_PUT_MUTEX: ++ return winesync_put_mutex(dev, argp); ++ case WINESYNC_IOC_READ_SEM: ++ return winesync_read_sem(dev, argp); ++ case WINESYNC_IOC_READ_MUTEX: ++ return winesync_read_mutex(dev, argp); ++ case WINESYNC_IOC_KILL_OWNER: ++ return winesync_kill_owner(dev, argp); ++ case WINESYNC_IOC_WAIT_ANY: ++ return winesync_wait_any(dev, argp); ++ case WINESYNC_IOC_WAIT_ALL: ++ return winesync_wait_all(dev, argp); ++ default: ++ return -ENOSYS; ++ } ++} ++ ++static const struct file_operations winesync_fops = { ++ .owner = THIS_MODULE, ++ .open = winesync_char_open, ++ .release = winesync_char_release, ++ .unlocked_ioctl = winesync_char_ioctl, ++ .compat_ioctl = winesync_char_ioctl, ++ .llseek = no_llseek, ++}; ++ ++static struct miscdevice winesync_misc = { ++ .minor = WINESYNC_MINOR, ++ .name = WINESYNC_NAME, ++ .fops = &winesync_fops, ++}; ++ ++static int __init winesync_init(void) ++{ ++ return misc_register(&winesync_misc); ++} ++ ++static void __exit winesync_exit(void) ++{ ++ misc_deregister(&winesync_misc); ++} ++ ++module_init(winesync_init); ++module_exit(winesync_exit); ++ ++MODULE_AUTHOR("Zebediah Figura"); ++MODULE_DESCRIPTION("Kernel driver for Wine synchronization primitives"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("devname:" WINESYNC_NAME); ++MODULE_ALIAS_MISCDEV(WINESYNC_MINOR); +diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h +index 0676f18093f9..350aecfcfb29 100644 +--- a/include/linux/miscdevice.h ++++ b/include/linux/miscdevice.h +@@ -71,6 +71,7 @@ + #define USERIO_MINOR 240 + #define VHOST_VSOCK_MINOR 241 + #define RFKILL_MINOR 242 ++#define WINESYNC_MINOR 243 + #define MISC_DYNAMIC_MINOR 255 + + struct device; +diff --git a/include/uapi/linux/winesync.h b/include/uapi/linux/winesync.h +new file mode 100644 +index 000000000000..efc591795249 +--- /dev/null ++++ b/include/uapi/linux/winesync.h +@@ -0,0 +1,61 @@ ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ ++/* ++ * Kernel support for Wine synchronization primitives ++ * ++ * Copyright (C) 2021 Zebediah Figura ++ */ ++ ++#ifndef __LINUX_WINESYNC_H ++#define __LINUX_WINESYNC_H ++ ++#include ++ ++#define WINESYNC_SEM_GETONWAIT 1 ++ ++struct winesync_sem_args { ++ __s32 sem; ++ __u32 count; ++ __u32 max; ++ __u32 flags; ++}; ++ ++struct winesync_mutex_args { ++ __s32 mutex; ++ __u32 owner; ++ __u32 count; ++}; ++ ++struct winesync_wait_args { ++ __u64 timeout; ++ __u64 objs; ++ __u32 count; ++ __u32 owner; ++ __u32 index; ++ __u32 pad; ++}; ++ ++#define WINESYNC_IOC_BASE 0xf7 ++ ++#define WINESYNC_IOC_CREATE_SEM _IOWR(WINESYNC_IOC_BASE, 0, \ ++ struct winesync_sem_args) ++#define WINESYNC_IOC_DELETE _IOW (WINESYNC_IOC_BASE, 1, __s32) ++#define WINESYNC_IOC_PUT_SEM _IOWR(WINESYNC_IOC_BASE, 2, \ ++ struct winesync_sem_args) ++#define WINESYNC_IOC_WAIT_ANY _IOWR(WINESYNC_IOC_BASE, 3, \ ++ struct winesync_wait_args) ++#define WINESYNC_IOC_WAIT_ALL _IOW (WINESYNC_IOC_BASE, 4, \ ++ struct winesync_wait_args) ++#define WINESYNC_IOC_CREATE_MUTEX _IOWR(WINESYNC_IOC_BASE, 5, \ ++ struct winesync_mutex_args) ++#define WINESYNC_IOC_PUT_MUTEX _IOWR(WINESYNC_IOC_BASE, 6, \ ++ struct winesync_mutex_args) ++#define WINESYNC_IOC_KILL_OWNER _IOW (WINESYNC_IOC_BASE, 7, __u32) ++#define WINESYNC_IOC_READ_SEM _IOWR(WINESYNC_IOC_BASE, 8, \ ++ struct winesync_sem_args) ++#define WINESYNC_IOC_READ_MUTEX _IOWR(WINESYNC_IOC_BASE, 9, \ ++ struct winesync_mutex_args) ++#define WINESYNC_IOC_GET_SEM _IOW (WINESYNC_IOC_BASE, 10, __s32) ++#define WINESYNC_IOC_PULSE_SEM _IOWR(WINESYNC_IOC_BASE, 11, \ ++ struct winesync_sem_args) ++ ++#endif +diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile +index 6c575cf34a71..7f43a50ee19e 100644 +--- a/tools/testing/selftests/Makefile ++++ b/tools/testing/selftests/Makefile +@@ -9,6 +9,7 @@ TARGETS += core + TARGETS += cpufreq + TARGETS += cpu-hotplug + TARGETS += drivers/dma-buf ++TARGETS += drivers/winesync + TARGETS += efivarfs + TARGETS += exec + TARGETS += filesystems +diff --git a/tools/testing/selftests/drivers/winesync/Makefile b/tools/testing/selftests/drivers/winesync/Makefile +new file mode 100644 +index 000000000000..43b39fdeea10 +--- /dev/null ++++ b/tools/testing/selftests/drivers/winesync/Makefile +@@ -0,0 +1,8 @@ ++# SPDX-LICENSE-IDENTIFIER: GPL-2.0-only ++TEST_GEN_PROGS := winesync ++ ++top_srcdir =../../../../.. ++CFLAGS += -I$(top_srcdir)/usr/include ++LDLIBS += -lpthread ++ ++include ../../lib.mk +diff --git a/tools/testing/selftests/drivers/winesync/config b/tools/testing/selftests/drivers/winesync/config +new file mode 100644 +index 000000000000..60539c826d06 +--- /dev/null ++++ b/tools/testing/selftests/drivers/winesync/config +@@ -0,0 +1 @@ ++CONFIG_WINESYNC=y +diff --git a/tools/testing/selftests/drivers/winesync/winesync.c b/tools/testing/selftests/drivers/winesync/winesync.c +new file mode 100644 +index 000000000000..52373fcd5c8c +--- /dev/null ++++ b/tools/testing/selftests/drivers/winesync/winesync.c +@@ -0,0 +1,1486 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Various unit tests for the "winesync" synchronization primitive driver. ++ * ++ * Copyright (C) 2021 Zebediah Figura ++ */ ++ ++#define _GNU_SOURCE ++#include ++#include ++#include ++#include ++#include ++#include ++#include "../../kselftest_harness.h" ++ ++TEST(semaphore_state) ++{ ++ struct winesync_wait_args wait_args; ++ struct winesync_sem_args sem_args; ++ struct timespec timeout; ++ int fd, ret; ++ ++ clock_gettime(CLOCK_MONOTONIC, &timeout); ++ ++ fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); ++ ASSERT_LE(0, fd); ++ ++ sem_args.count = 3; ++ sem_args.max = 2; ++ sem_args.sem = 0xdeadbeef; ++ sem_args.flags = 0; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ sem_args.count = 2; ++ sem_args.max = 2; ++ sem_args.sem = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_NE(0xdeadbeef, sem_args.sem); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ sem_args.flags = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ EXPECT_EQ(0, sem_args.flags); ++ ++ sem_args.count = 0; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, sem_args.count); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EOVERFLOW, errno); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ wait_args.timeout = (uintptr_t)&timeout; ++ wait_args.objs = (uintptr_t)&sem_args.sem; ++ wait_args.count = 1; ++ wait_args.owner = 123; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, wait_args.index); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ ret = ioctl(fd, WINESYNC_IOC_GET_SEM, &sem_args.sem); ++ EXPECT_EQ(0, ret); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ ret = ioctl(fd, WINESYNC_IOC_GET_SEM, &sem_args.sem); ++ EXPECT_EQ(0, ret); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ ret = ioctl(fd, WINESYNC_IOC_GET_SEM, &sem_args.sem); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EWOULDBLOCK, errno); ++ ++ sem_args.count = 3; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EOVERFLOW, errno); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ sem_args.count = 2; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ ret = ioctl(fd, WINESYNC_IOC_GET_SEM, &sem_args.sem); ++ EXPECT_EQ(0, ret); ++ ret = ioctl(fd, WINESYNC_IOC_GET_SEM, &sem_args.sem); ++ EXPECT_EQ(0, ret); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ /* Test PULSE. */ ++ ++ sem_args.count = 2; ++ ret = ioctl(fd, WINESYNC_IOC_PULSE_SEM, &sem_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EOVERFLOW, errno); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PULSE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, sem_args.count); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PULSE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ sem_args.count = 2; ++ ret = ioctl(fd, WINESYNC_IOC_PULSE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &sem_args.sem); ++ EXPECT_EQ(0, ret); ++ ++ close(fd); ++} ++ ++TEST(semaphore_state_getonwait) ++{ ++ struct winesync_wait_args wait_args; ++ struct winesync_sem_args sem_args; ++ struct timespec timeout; ++ int fd, ret; ++ ++ clock_gettime(CLOCK_MONOTONIC, &timeout); ++ ++ fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); ++ ASSERT_LE(0, fd); ++ ++ sem_args.count = 3; ++ sem_args.max = 2; ++ sem_args.sem = 0xdeadbeef; ++ sem_args.flags = WINESYNC_SEM_GETONWAIT; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ sem_args.count = 2; ++ sem_args.max = 2; ++ sem_args.sem = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_NE(0xdeadbeef, sem_args.sem); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ sem_args.count = 0; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, sem_args.count); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EOVERFLOW, errno); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ wait_args.timeout = (uintptr_t)&timeout; ++ wait_args.objs = (uintptr_t)&sem_args.sem; ++ wait_args.count = 1; ++ wait_args.owner = 123; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, wait_args.index); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, wait_args.index); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(ETIMEDOUT, errno); ++ ++ sem_args.count = 3; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EOVERFLOW, errno); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ sem_args.count = 2; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ /* Test GET. */ ++ ++ ret = ioctl(fd, WINESYNC_IOC_GET_SEM, &sem_args.sem); ++ EXPECT_EQ(0, ret); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ ret = ioctl(fd, WINESYNC_IOC_GET_SEM, &sem_args.sem); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EWOULDBLOCK, errno); ++ ++ sem_args.count = 2; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ ret = ioctl(fd, WINESYNC_IOC_GET_SEM, &sem_args.sem); ++ EXPECT_EQ(0, ret); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ /* Test PULSE. */ ++ ++ sem_args.count = 2; ++ ret = ioctl(fd, WINESYNC_IOC_PULSE_SEM, &sem_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EOVERFLOW, errno); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PULSE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, sem_args.count); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PULSE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ sem_args.count = 2; ++ ret = ioctl(fd, WINESYNC_IOC_PULSE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(2, sem_args.max); ++ ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &sem_args.sem); ++ EXPECT_EQ(0, ret); ++ ++ close(fd); ++} ++ ++TEST(mutex_state) ++{ ++ struct winesync_wait_args wait_args; ++ struct winesync_mutex_args mutex_args; ++ struct timespec timeout; ++ __u32 owner; ++ int fd, ret; ++ ++ clock_gettime(CLOCK_MONOTONIC, &timeout); ++ ++ fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); ++ ASSERT_LE(0, fd); ++ ++ mutex_args.owner = 123; ++ mutex_args.count = 0; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ mutex_args.owner = 0; ++ mutex_args.count = 2; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ mutex_args.owner = 123; ++ mutex_args.count = 2; ++ mutex_args.mutex = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_NE(0xdeadbeef, mutex_args.mutex); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &mutex_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 456; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &mutex_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EPERM, errno); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 123; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, mutex_args.count); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 123; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, mutex_args.count); ++ EXPECT_EQ(0, mutex_args.owner); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 123; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &mutex_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EPERM, errno); ++ ++ wait_args.timeout = (uintptr_t)&timeout; ++ wait_args.objs = (uintptr_t)&mutex_args.mutex; ++ wait_args.count = 1; ++ wait_args.owner = 456; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, wait_args.index); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(456, mutex_args.owner); ++ ++ wait_args.owner = 456; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, wait_args.index); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, mutex_args.count); ++ EXPECT_EQ(456, mutex_args.owner); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 456; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, mutex_args.count); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(456, mutex_args.owner); ++ ++ wait_args.owner = 123; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(ETIMEDOUT, errno); ++ ++ owner = 0; ++ ret = ioctl(fd, WINESYNC_IOC_KILL_OWNER, &owner); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ owner = 123; ++ ret = ioctl(fd, WINESYNC_IOC_KILL_OWNER, &owner); ++ EXPECT_EQ(0, ret); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(456, mutex_args.owner); ++ ++ owner = 456; ++ ret = ioctl(fd, WINESYNC_IOC_KILL_OWNER, &owner); ++ EXPECT_EQ(0, ret); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EOWNERDEAD, errno); ++ EXPECT_EQ(0, mutex_args.count); ++ EXPECT_EQ(0, mutex_args.owner); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EOWNERDEAD, errno); ++ EXPECT_EQ(0, mutex_args.count); ++ EXPECT_EQ(0, mutex_args.owner); ++ ++ wait_args.owner = 123; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EOWNERDEAD, errno); ++ EXPECT_EQ(0, wait_args.index); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ owner = 123; ++ ret = ioctl(fd, WINESYNC_IOC_KILL_OWNER, &owner); ++ EXPECT_EQ(0, ret); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EOWNERDEAD, errno); ++ EXPECT_EQ(0, mutex_args.count); ++ EXPECT_EQ(0, mutex_args.owner); ++ ++ wait_args.owner = 123; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EOWNERDEAD, errno); ++ EXPECT_EQ(0, wait_args.index); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex_args.mutex); ++ EXPECT_EQ(0, ret); ++ ++ mutex_args.owner = 0; ++ mutex_args.count = 0; ++ mutex_args.mutex = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_NE(0xdeadbeef, mutex_args.mutex); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, mutex_args.count); ++ EXPECT_EQ(0, mutex_args.owner); ++ ++ wait_args.owner = 123; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, wait_args.index); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex_args.mutex); ++ EXPECT_EQ(0, ret); ++ ++ close(fd); ++} ++ ++TEST(wait_any) ++{ ++ struct winesync_mutex_args mutex_args = {0}; ++ struct winesync_wait_args wait_args = {0}; ++ struct winesync_sem_args sem_args = {0}; ++ struct timespec timeout; ++ __s32 objs[2]; ++ __u32 owner; ++ int fd, ret; ++ ++ clock_gettime(CLOCK_MONOTONIC, &timeout); ++ ++ fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); ++ ASSERT_LE(0, fd); ++ ++ sem_args.count = 2; ++ sem_args.max = 3; ++ sem_args.sem = 0xdeadbeef; ++ sem_args.flags = WINESYNC_SEM_GETONWAIT; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_NE(0xdeadbeef, sem_args.sem); ++ ++ mutex_args.owner = 0; ++ mutex_args.count = 0; ++ mutex_args.mutex = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_NE(0xdeadbeef, mutex_args.mutex); ++ ++ objs[0] = sem_args.sem; ++ objs[1] = mutex_args.mutex; ++ ++ wait_args.timeout = (uintptr_t)&timeout; ++ wait_args.objs = (uintptr_t)objs; ++ wait_args.count = 2; ++ wait_args.owner = 123; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, wait_args.index); ++ EXPECT_EQ((uintptr_t)objs, wait_args.objs); ++ EXPECT_EQ(2, wait_args.count); ++ EXPECT_EQ(123, wait_args.owner); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, sem_args.count); ++ EXPECT_EQ(3, sem_args.max); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, mutex_args.count); ++ EXPECT_EQ(0, mutex_args.owner); ++ ++ wait_args.owner = 123; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, wait_args.index); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(3, sem_args.max); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, mutex_args.count); ++ EXPECT_EQ(0, mutex_args.owner); ++ ++ wait_args.owner = 123; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, wait_args.index); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(3, sem_args.max); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ wait_args.owner = 123; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, wait_args.index); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(3, sem_args.max); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ wait_args.owner = 123; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, wait_args.index); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(3, sem_args.max); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ wait_args.owner = 456; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(ETIMEDOUT, errno); ++ ++ owner = 123; ++ ret = ioctl(fd, WINESYNC_IOC_KILL_OWNER, &owner); ++ EXPECT_EQ(0, ret); ++ ++ wait_args.owner = 456; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EOWNERDEAD, errno); ++ EXPECT_EQ(1, wait_args.index); ++ ++ wait_args.owner = 456; ++ wait_args.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, wait_args.index); ++ ++ /* test waiting on the same object twice */ ++ sem_args.count = 2; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ objs[0] = objs[1] = sem_args.sem; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, wait_args.index); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, sem_args.count); ++ EXPECT_EQ(3, sem_args.max); ++ ++ wait_args.count = 0; ++ wait_args.objs = (uintptr_t)NULL; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(ETIMEDOUT, errno); ++ ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &sem_args.sem); ++ EXPECT_EQ(0, ret); ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex_args.mutex); ++ EXPECT_EQ(0, ret); ++ ++ close(fd); ++} ++ ++TEST(wait_all) ++{ ++ struct winesync_mutex_args mutex_args = {0}; ++ struct winesync_wait_args wait_args = {0}; ++ struct winesync_sem_args sem_args = {0}; ++ struct timespec timeout; ++ __s32 objs[2]; ++ __u32 owner; ++ int fd, ret; ++ ++ clock_gettime(CLOCK_MONOTONIC, &timeout); ++ ++ fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); ++ ASSERT_LE(0, fd); ++ ++ sem_args.count = 2; ++ sem_args.max = 3; ++ sem_args.sem = 0xdeadbeef; ++ sem_args.flags = WINESYNC_SEM_GETONWAIT; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_NE(0xdeadbeef, sem_args.sem); ++ ++ mutex_args.owner = 0; ++ mutex_args.count = 0; ++ mutex_args.mutex = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_NE(0xdeadbeef, mutex_args.mutex); ++ ++ objs[0] = sem_args.sem; ++ objs[1] = mutex_args.mutex; ++ ++ wait_args.timeout = (uintptr_t)&timeout; ++ wait_args.objs = (uintptr_t)objs; ++ wait_args.count = 2; ++ wait_args.owner = 123; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ALL, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ((uintptr_t)objs, wait_args.objs); ++ EXPECT_EQ(2, wait_args.count); ++ EXPECT_EQ(123, wait_args.owner); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, sem_args.count); ++ EXPECT_EQ(3, sem_args.max); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ wait_args.owner = 456; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ALL, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(ETIMEDOUT, errno); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, sem_args.count); ++ EXPECT_EQ(3, sem_args.max); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ wait_args.owner = 123; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ALL, &wait_args); ++ EXPECT_EQ(0, ret); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(3, sem_args.max); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ALL, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(ETIMEDOUT, errno); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ EXPECT_EQ(3, sem_args.max); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ sem_args.count = 3; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ owner = 123; ++ ret = ioctl(fd, WINESYNC_IOC_KILL_OWNER, &owner); ++ EXPECT_EQ(0, ret); ++ ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ALL, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EOWNERDEAD, errno); ++ ++ sem_args.count = 0xdeadbeef; ++ sem_args.max = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, sem_args.count); ++ EXPECT_EQ(3, sem_args.max); ++ ++ mutex_args.count = 0xdeadbeef; ++ mutex_args.owner = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(123, mutex_args.owner); ++ ++ /* test waiting on the same object twice */ ++ objs[0] = objs[1] = sem_args.sem; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ALL, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &sem_args.sem); ++ EXPECT_EQ(0, ret); ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex_args.mutex); ++ EXPECT_EQ(0, ret); ++ ++ close(fd); ++} ++ ++TEST(invalid_objects) ++{ ++ struct winesync_mutex_args mutex_args = {0}; ++ struct winesync_wait_args wait_args = {0}; ++ struct winesync_sem_args sem_args = {0}; ++ __s32 objs[2] = {0}; ++ int fd, ret; ++ ++ fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); ++ ASSERT_LE(0, fd); ++ ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ ret = ioctl(fd, WINESYNC_IOC_PULSE_SEM, &sem_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ ret = ioctl(fd, WINESYNC_IOC_GET_SEM, &sem_args.sem); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &mutex_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ wait_args.objs = (uintptr_t)objs; ++ wait_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ALL, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &objs[0]); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ sem_args.max = 1; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ ++ mutex_args.mutex = sem_args.sem; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &mutex_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ objs[0] = sem_args.sem; ++ objs[1] = sem_args.sem + 1; ++ wait_args.count = 2; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ALL, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ objs[0] = sem_args.sem + 1; ++ objs[1] = sem_args.sem; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ALL, &wait_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &sem_args.sem); ++ EXPECT_EQ(0, ret); ++ ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ ++ sem_args.sem = mutex_args.mutex; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(-1, ret); ++ EXPECT_EQ(EINVAL, errno); ++ ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex_args.mutex); ++ EXPECT_EQ(0, ret); ++ ++ close(fd); ++} ++ ++struct wake_args ++{ ++ int fd; ++ __s32 obj; ++}; ++ ++struct wait_args ++{ ++ int fd; ++ unsigned long request; ++ struct winesync_wait_args *args; ++ int ret; ++ int err; ++}; ++ ++static void *wait_thread(void *arg) ++{ ++ struct wait_args *args = arg; ++ ++ args->ret = ioctl(args->fd, args->request, args->args); ++ args->err = errno; ++ return NULL; ++} ++ ++static void get_abs_timeout(struct timespec *timeout, clockid_t clock, ++ unsigned int ms) ++{ ++ clock_gettime(clock, timeout); ++ timeout->tv_nsec += ms * 1000000; ++ timeout->tv_sec += (timeout->tv_nsec / 1000000000); ++ timeout->tv_nsec %= 1000000000; ++} ++ ++static int wait_for_thread(pthread_t thread, unsigned int ms) ++{ ++ struct timespec timeout; ++ get_abs_timeout(&timeout, CLOCK_REALTIME, ms); ++ return pthread_timedjoin_np(thread, NULL, &timeout); ++} ++ ++TEST(wake_any) ++{ ++ struct winesync_mutex_args mutex_args = {0}; ++ struct winesync_wait_args wait_args = {0}; ++ struct winesync_sem_args sem_args = {0}; ++ struct wait_args thread_args; ++ struct timespec timeout; ++ pthread_t thread; ++ __s32 objs[2]; ++ __u32 owner; ++ int fd, ret; ++ ++ fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); ++ ASSERT_LE(0, fd); ++ ++ sem_args.count = 0; ++ sem_args.max = 3; ++ sem_args.sem = 0xdeadbeef; ++ sem_args.flags = WINESYNC_SEM_GETONWAIT; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_NE(0xdeadbeef, sem_args.sem); ++ ++ mutex_args.owner = 123; ++ mutex_args.count = 1; ++ mutex_args.mutex = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_NE(0xdeadbeef, mutex_args.mutex); ++ ++ objs[0] = sem_args.sem; ++ objs[1] = mutex_args.mutex; ++ ++ /* test waking the semaphore */ ++ ++ get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); ++ wait_args.timeout = (uintptr_t)&timeout; ++ wait_args.objs = (uintptr_t)objs; ++ wait_args.count = 2; ++ wait_args.owner = 456; ++ wait_args.index = 0xdeadbeef; ++ thread_args.fd = fd; ++ thread_args.args = &wait_args; ++ thread_args.request = WINESYNC_IOC_WAIT_ANY; ++ ret = pthread_create(&thread, NULL, wait_thread, &thread_args); ++ EXPECT_EQ(0, ret); ++ ++ ret = wait_for_thread(thread, 100); ++ EXPECT_EQ(ETIMEDOUT, ret); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ ret = wait_for_thread(thread, 100); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, thread_args.ret); ++ EXPECT_EQ(0, wait_args.index); ++ ++ /* test waking the semaphore via pulse */ ++ ++ get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); ++ wait_args.owner = 456; ++ ret = pthread_create(&thread, NULL, wait_thread, &thread_args); ++ EXPECT_EQ(0, ret); ++ ++ ret = wait_for_thread(thread, 100); ++ EXPECT_EQ(ETIMEDOUT, ret); ++ ++ sem_args.count = 2; ++ ret = ioctl(fd, WINESYNC_IOC_PULSE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ ret = wait_for_thread(thread, 100); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, thread_args.ret); ++ EXPECT_EQ(0, wait_args.index); ++ ++ /* test waking the mutex */ ++ ++ /* first grab it again for owner 123 */ ++ wait_args.owner = 123; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, wait_args.index); ++ ++ get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); ++ wait_args.owner = 456; ++ ret = pthread_create(&thread, NULL, wait_thread, &thread_args); ++ EXPECT_EQ(0, ret); ++ ++ ret = wait_for_thread(thread, 100); ++ EXPECT_EQ(ETIMEDOUT, ret); ++ ++ mutex_args.owner = 123; ++ mutex_args.count = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(2, mutex_args.count); ++ ++ ret = pthread_tryjoin_np(thread, NULL); ++ EXPECT_EQ(EBUSY, ret); ++ ++ mutex_args.owner = 123; ++ mutex_args.count = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(456, mutex_args.owner); ++ ++ ret = wait_for_thread(thread, 100); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, thread_args.ret); ++ EXPECT_EQ(1, wait_args.index); ++ ++ /* delete an object while it's being waited on */ ++ ++ get_abs_timeout(&timeout, CLOCK_MONOTONIC, 200); ++ wait_args.owner = 123; ++ ret = pthread_create(&thread, NULL, wait_thread, &thread_args); ++ EXPECT_EQ(0, ret); ++ ++ ret = wait_for_thread(thread, 100); ++ EXPECT_EQ(ETIMEDOUT, ret); ++ ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &sem_args.sem); ++ EXPECT_EQ(0, ret); ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex_args.mutex); ++ EXPECT_EQ(0, ret); ++ ++ ret = wait_for_thread(thread, 200); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(-1, thread_args.ret); ++ EXPECT_EQ(ETIMEDOUT, thread_args.err); ++ ++ close(fd); ++} ++ ++TEST(wake_all) ++{ ++ struct winesync_wait_args wait_args = {0}, wait_args2 = {0}; ++ struct winesync_mutex_args mutex_args = {0}; ++ struct winesync_sem_args sem_args = {0}; ++ struct timespec timeout, timeout2; ++ struct wait_args thread_args; ++ pthread_t thread; ++ __s32 objs[2]; ++ __u32 owner; ++ int fd, ret; ++ ++ fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); ++ ASSERT_LE(0, fd); ++ ++ sem_args.count = 0; ++ sem_args.max = 3; ++ sem_args.sem = 0xdeadbeef; ++ sem_args.flags = WINESYNC_SEM_GETONWAIT; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_NE(0xdeadbeef, sem_args.sem); ++ ++ mutex_args.owner = 123; ++ mutex_args.count = 1; ++ mutex_args.mutex = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_NE(0xdeadbeef, mutex_args.mutex); ++ ++ objs[0] = sem_args.sem; ++ objs[1] = mutex_args.mutex; ++ ++ get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); ++ wait_args.timeout = (uintptr_t)&timeout; ++ wait_args.objs = (uintptr_t)objs; ++ wait_args.count = 2; ++ wait_args.owner = 456; ++ thread_args.fd = fd; ++ thread_args.args = &wait_args; ++ thread_args.request = WINESYNC_IOC_WAIT_ALL; ++ ret = pthread_create(&thread, NULL, wait_thread, &thread_args); ++ EXPECT_EQ(0, ret); ++ ++ ret = wait_for_thread(thread, 100); ++ EXPECT_EQ(ETIMEDOUT, ret); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ ret = pthread_tryjoin_np(thread, NULL); ++ EXPECT_EQ(EBUSY, ret); ++ ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, sem_args.count); ++ ++ get_abs_timeout(&timeout2, CLOCK_MONOTONIC, 0); ++ wait_args2.timeout = (uintptr_t)&timeout2; ++ wait_args2.objs = (uintptr_t)&sem_args.sem; ++ wait_args2.count = 1; ++ wait_args2.owner = 123; ++ wait_args2.index = 0xdeadbeef; ++ ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args2); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, wait_args2.index); ++ ++ mutex_args.owner = 123; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ ++ ret = pthread_tryjoin_np(thread, NULL); ++ EXPECT_EQ(EBUSY, ret); ++ ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, mutex_args.count); ++ EXPECT_EQ(0, mutex_args.owner); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(1, mutex_args.count); ++ EXPECT_EQ(456, mutex_args.owner); ++ ++ ret = wait_for_thread(thread, 100); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, thread_args.ret); ++ ++ /* test waking the semaphore via pulse */ ++ ++ get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); ++ wait_args.owner = 456; ++ ret = pthread_create(&thread, NULL, wait_thread, &thread_args); ++ EXPECT_EQ(0, ret); ++ ++ ret = wait_for_thread(thread, 100); ++ EXPECT_EQ(ETIMEDOUT, ret); ++ ++ sem_args.count = 1; ++ ret = ioctl(fd, WINESYNC_IOC_PULSE_SEM, &sem_args); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, sem_args.count); ++ ++ ret = wait_for_thread(thread, 100); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(0, thread_args.ret); ++ ++ /* delete an object while it's being waited on */ ++ ++ get_abs_timeout(&timeout, CLOCK_MONOTONIC, 200); ++ wait_args.owner = 123; ++ ret = pthread_create(&thread, NULL, wait_thread, &thread_args); ++ EXPECT_EQ(0, ret); ++ ++ ret = wait_for_thread(thread, 100); ++ EXPECT_EQ(ETIMEDOUT, ret); ++ ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &sem_args.sem); ++ EXPECT_EQ(0, ret); ++ ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex_args.mutex); ++ EXPECT_EQ(0, ret); ++ ++ ret = wait_for_thread(thread, 200); ++ EXPECT_EQ(0, ret); ++ EXPECT_EQ(-1, thread_args.ret); ++ EXPECT_EQ(ETIMEDOUT, thread_args.err); ++ ++ close(fd); ++} ++ ++TEST_HARNESS_MAIN diff --git a/linux-tkg-patches/5.13/0009-glitched-bmq.patch b/linux-tkg-patches/5.13/0009-glitched-bmq.patch new file mode 100644 index 0000000..e42e522 --- /dev/null +++ b/linux-tkg-patches/5.13/0009-glitched-bmq.patch @@ -0,0 +1,90 @@ +From f7f49141a5dbe9c99d78196b58c44307fb2e6be3 Mon Sep 17 00:00:00 2001 +From: Tk-Glitch +Date: Wed, 4 Jul 2018 04:30:08 +0200 +Subject: glitched - BMQ + +diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz +index 2a202a846757..1d9c7ed79b11 100644 +--- a/kernel/Kconfig.hz ++++ b/kernel/Kconfig.hz +@@ -4,7 +4,7 @@ + + choice + prompt "Timer frequency" +- default HZ_250 ++ default HZ_500 + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -39,6 +39,13 @@ choice + on SMP and NUMA systems and exactly dividing by both PAL and + NTSC frame rates for video and multimedia work. + ++ config HZ_500 ++ bool "500 HZ" ++ help ++ 500 Hz is a balanced timer frequency. Provides fast interactivity ++ on desktops with great smoothness without increasing CPU power ++ consumption and sacrificing the battery life on laptops. ++ + config HZ_1000 + bool "1000 HZ" + help +@@ -52,6 +59,7 @@ config HZ + default 100 if HZ_100 + default 250 if HZ_250 + default 300 if HZ_300 ++ default 500 if HZ_500 + default 1000 if HZ_1000 + + config SCHED_HRTICK + +diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz +index 2a202a846757..1d9c7ed79b11 100644 +--- a/kernel/Kconfig.hz ++++ b/kernel/Kconfig.hz +@@ -4,7 +4,7 @@ + + choice + prompt "Timer frequency" +- default HZ_500 ++ default HZ_750 + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -46,6 +46,13 @@ choice + on desktops with great smoothness without increasing CPU power + consumption and sacrificing the battery life on laptops. + ++ config HZ_750 ++ bool "750 HZ" ++ help ++ 750 Hz is a good timer frequency for desktops. Provides fast ++ interactivity with great smoothness without sacrificing too ++ much throughput. ++ + config HZ_1000 + bool "1000 HZ" + help +@@ -60,6 +67,7 @@ config HZ + default 250 if HZ_250 + default 300 if HZ_300 + default 500 if HZ_500 ++ default 750 if HZ_750 + default 1000 if HZ_1000 + + config SCHED_HRTICK + +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 9270a4370d54..30d01e647417 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -169,7 +169,7 @@ + /* + * From 0 .. 200. Higher means more swappy. + */ +-int vm_swappiness = 60; ++int vm_swappiness = 20; + + static void set_task_reclaim_state(struct task_struct *task, + struct reclaim_state *rs) diff --git a/linux-tkg-patches/5.13/0009-glitched-ondemand-bmq.patch b/linux-tkg-patches/5.13/0009-glitched-ondemand-bmq.patch new file mode 100644 index 0000000..a926040 --- /dev/null +++ b/linux-tkg-patches/5.13/0009-glitched-ondemand-bmq.patch @@ -0,0 +1,18 @@ +diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c +index 6b423eebfd5d..61e3271675d6 100644 +--- a/drivers/cpufreq/cpufreq_ondemand.c ++++ b/drivers/cpufreq/cpufreq_ondemand.c +@@ -21,10 +21,10 @@ + #include "cpufreq_ondemand.h" + + /* On-demand governor macros */ +-#define DEF_FREQUENCY_UP_THRESHOLD (80) +-#define DEF_SAMPLING_DOWN_FACTOR (1) ++#define DEF_FREQUENCY_UP_THRESHOLD (55) ++#define DEF_SAMPLING_DOWN_FACTOR (5) + #define MAX_SAMPLING_DOWN_FACTOR (100000) +-#define MICRO_FREQUENCY_UP_THRESHOLD (95) ++#define MICRO_FREQUENCY_UP_THRESHOLD (63) + #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) + #define MIN_FREQUENCY_UP_THRESHOLD (1) + #define MAX_FREQUENCY_UP_THRESHOLD (100) diff --git a/linux-tkg-patches/5.13/0009-prjc_v5.13-r0.patch b/linux-tkg-patches/5.13/0009-prjc_v5.13-r0.patch new file mode 100644 index 0000000..0f11435 --- /dev/null +++ b/linux-tkg-patches/5.13/0009-prjc_v5.13-r0.patch @@ -0,0 +1,9777 @@ +diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst +index 1d56a6b73a4e..e08ffb857277 100644 +--- a/Documentation/admin-guide/sysctl/kernel.rst ++++ b/Documentation/admin-guide/sysctl/kernel.rst +@@ -1515,3 +1515,13 @@ is 10 seconds. + + The softlockup threshold is (``2 * watchdog_thresh``). Setting this + tunable to zero will disable lockup detection altogether. ++ ++yield_type: ++=========== ++ ++BMQ/PDS CPU scheduler only. This determines what type of yield calls ++to sched_yield will perform. ++ ++ 0 - No yield. ++ 1 - Deboost and requeue task. (default) ++ 2 - Set run queue skip task. +diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt +new file mode 100644 +index 000000000000..05c84eec0f31 +--- /dev/null ++++ b/Documentation/scheduler/sched-BMQ.txt +@@ -0,0 +1,110 @@ ++ BitMap queue CPU Scheduler ++ -------------------------- ++ ++CONTENT ++======== ++ ++ Background ++ Design ++ Overview ++ Task policy ++ Priority management ++ BitMap Queue ++ CPU Assignment and Migration ++ ++ ++Background ++========== ++ ++BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution ++of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS), ++and inspired by Zircon scheduler. The goal of it is to keep the scheduler code ++simple, while efficiency and scalable for interactive tasks, such as desktop, ++movie playback and gaming etc. ++ ++Design ++====== ++ ++Overview ++-------- ++ ++BMQ use per CPU run queue design, each CPU(logical) has it's own run queue, ++each CPU is responsible for scheduling the tasks that are putting into it's ++run queue. ++ ++The run queue is a set of priority queues. Note that these queues are fifo ++queue for non-rt tasks or priority queue for rt tasks in data structure. See ++BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact ++that most applications are non-rt tasks. No matter the queue is fifo or ++priority, In each queue is an ordered list of runnable tasks awaiting execution ++and the data structures are the same. When it is time for a new task to run, ++the scheduler simply looks the lowest numbered queueue that contains a task, ++and runs the first task from the head of that queue. And per CPU idle task is ++also in the run queue, so the scheduler can always find a task to run on from ++its run queue. ++ ++Each task will assigned the same timeslice(default 4ms) when it is picked to ++start running. Task will be reinserted at the end of the appropriate priority ++queue when it uses its whole timeslice. When the scheduler selects a new task ++from the priority queue it sets the CPU's preemption timer for the remainder of ++the previous timeslice. When that timer fires the scheduler will stop execution ++on that task, select another task and start over again. ++ ++If a task blocks waiting for a shared resource then it's taken out of its ++priority queue and is placed in a wait queue for the shared resource. When it ++is unblocked it will be reinserted in the appropriate priority queue of an ++eligible CPU. ++ ++Task policy ++----------- ++ ++BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the ++mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's ++NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each ++policy. ++ ++DEADLINE ++ It is squashed as priority 0 FIFO task. ++ ++FIFO/RR ++ All RT tasks share one single priority queue in BMQ run queue designed. The ++complexity of insert operation is O(n). BMQ is not designed for system runs ++with major rt policy tasks. ++ ++NORMAL/BATCH/IDLE ++ BATCH and IDLE tasks are treated as the same policy. They compete CPU with ++NORMAL policy tasks, but they just don't boost. To control the priority of ++NORMAL/BATCH/IDLE tasks, simply use nice level. ++ ++ISO ++ ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy ++task instead. ++ ++Priority management ++------------------- ++ ++RT tasks have priority from 0-99. For non-rt tasks, there are three different ++factors used to determine the effective priority of a task. The effective ++priority being what is used to determine which queue it will be in. ++ ++The first factor is simply the task’s static priority. Which is assigned from ++task's nice level, within [-20, 19] in userland's point of view and [0, 39] ++internally. ++ ++The second factor is the priority boost. This is a value bounded between ++[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is ++modified by the following cases: ++ ++*When a thread has used up its entire timeslice, always deboost its boost by ++increasing by one. ++*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule, ++and its switch-in time(time after last switch and run) below the thredhold ++based on its priority boost, will boost its boost by decreasing by one buti is ++capped at 0 (won’t go negative). ++ ++The intent in this system is to ensure that interactive threads are serviced ++quickly. These are usually the threads that interact directly with the user ++and cause user-perceivable latency. These threads usually do little work and ++spend most of their time blocked awaiting another user event. So they get the ++priority boost from unblocking while background threads that do most of the ++processing receive the priority penalty for using their entire timeslice. +diff --git a/fs/proc/base.c b/fs/proc/base.c +index 3851bfcdba56..732636ac3fd3 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -476,7 +476,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns, + seq_puts(m, "0 0 0\n"); + else + seq_printf(m, "%llu %llu %lu\n", +- (unsigned long long)task->se.sum_exec_runtime, ++ (unsigned long long)tsk_seruntime(task), + (unsigned long long)task->sched_info.run_delay, + task->sched_info.pcount); + +diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h +index 8874f681b056..59eb72bf7d5f 100644 +--- a/include/asm-generic/resource.h ++++ b/include/asm-generic/resource.h +@@ -23,7 +23,7 @@ + [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_SIGPENDING] = { 0, 0 }, \ + [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ +- [RLIMIT_NICE] = { 0, 0 }, \ ++ [RLIMIT_NICE] = { 30, 30 }, \ + [RLIMIT_RTPRIO] = { 0, 0 }, \ + [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ + } +diff --git a/include/linux/sched.h b/include/linux/sched.h +index ef00bb22164c..2290806d8af2 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -35,6 +35,7 @@ + #include + #include + #include ++#include + #include + + /* task_struct member predeclarations (sorted alphabetically): */ +@@ -670,12 +671,18 @@ struct task_struct { + unsigned int ptrace; + + #ifdef CONFIG_SMP +- int on_cpu; + struct __call_single_node wake_entry; ++#endif ++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT) ++ int on_cpu; ++#endif ++ ++#ifdef CONFIG_SMP + #ifdef CONFIG_THREAD_INFO_IN_TASK + /* Current CPU: */ + unsigned int cpu; + #endif ++#ifndef CONFIG_SCHED_ALT + unsigned int wakee_flips; + unsigned long wakee_flip_decay_ts; + struct task_struct *last_wakee; +@@ -689,6 +696,7 @@ struct task_struct { + */ + int recent_used_cpu; + int wake_cpu; ++#endif /* !CONFIG_SCHED_ALT */ + #endif + int on_rq; + +@@ -697,13 +705,33 @@ struct task_struct { + int normal_prio; + unsigned int rt_priority; + ++#ifdef CONFIG_SCHED_ALT ++ u64 last_ran; ++ s64 time_slice; ++#ifdef CONFIG_SCHED_BMQ ++ int boost_prio; ++ int bmq_idx; ++ struct list_head bmq_node; ++#endif /* CONFIG_SCHED_BMQ */ ++#ifdef CONFIG_SCHED_PDS ++ u64 deadline; ++ u64 priodl; ++ /* skip list level */ ++ int sl_level; ++ /* skip list node */ ++ struct skiplist_node sl_node; ++#endif /* CONFIG_SCHED_PDS */ ++ /* sched_clock time spent running */ ++ u64 sched_time; ++#else /* !CONFIG_SCHED_ALT */ + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; ++ struct sched_dl_entity dl; ++#endif + #ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; + #endif +- struct sched_dl_entity dl; + + #ifdef CONFIG_UCLAMP_TASK + /* +@@ -1388,6 +1416,15 @@ struct task_struct { + */ + }; + ++#ifdef CONFIG_SCHED_ALT ++#define tsk_seruntime(t) ((t)->sched_time) ++/* replace the uncertian rt_timeout with 0UL */ ++#define tsk_rttimeout(t) (0UL) ++#else /* CFS */ ++#define tsk_seruntime(t) ((t)->se.sum_exec_runtime) ++#define tsk_rttimeout(t) ((t)->rt.timeout) ++#endif /* !CONFIG_SCHED_ALT */ ++ + static inline struct pid *task_pid(struct task_struct *task) + { + return task->thread_pid; +diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h +index 1aff00b65f3c..179d77c8360e 100644 +--- a/include/linux/sched/deadline.h ++++ b/include/linux/sched/deadline.h +@@ -1,5 +1,24 @@ + /* SPDX-License-Identifier: GPL-2.0 */ + ++#ifdef CONFIG_SCHED_ALT ++ ++static inline int dl_task(struct task_struct *p) ++{ ++ return 0; ++} ++ ++#ifdef CONFIG_SCHED_BMQ ++#define __tsk_deadline(p) (0UL) ++#endif ++ ++#ifdef CONFIG_SCHED_PDS ++#define __tsk_deadline(p) ((p)->priodl) ++#endif ++ ++#else ++ ++#define __tsk_deadline(p) ((p)->dl.deadline) ++ + /* + * SCHED_DEADLINE tasks has negative priorities, reflecting + * the fact that any of them has higher prio than RT and +@@ -19,6 +38,7 @@ static inline int dl_task(struct task_struct *p) + { + return dl_prio(p->prio); + } ++#endif /* CONFIG_SCHED_ALT */ + + static inline bool dl_time_before(u64 a, u64 b) + { +diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h +index ab83d85e1183..4d4f92bffeea 100644 +--- a/include/linux/sched/prio.h ++++ b/include/linux/sched/prio.h +@@ -18,6 +18,14 @@ + #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) + #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) + ++/* +/- priority levels from the base priority */ ++#ifdef CONFIG_SCHED_BMQ ++#define MAX_PRIORITY_ADJ 7 ++#endif ++#ifdef CONFIG_SCHED_PDS ++#define MAX_PRIORITY_ADJ 0 ++#endif ++ + /* + * Convert user-nice values [ -20 ... 0 ... 19 ] + * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], +diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h +index e5af028c08b4..0a7565d0d3cf 100644 +--- a/include/linux/sched/rt.h ++++ b/include/linux/sched/rt.h +@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk) + + if (policy == SCHED_FIFO || policy == SCHED_RR) + return true; ++#ifndef CONFIG_SCHED_ALT + if (policy == SCHED_DEADLINE) + return true; ++#endif + return false; + } + +diff --git a/include/linux/skip_list.h b/include/linux/skip_list.h +new file mode 100644 +index 000000000000..637c83ecbd6b +--- /dev/null ++++ b/include/linux/skip_list.h +@@ -0,0 +1,175 @@ ++/* ++ * Copyright (C) 2016 Alfred Chen. ++ * ++ * Code based on Con Kolivas's skip list implementation for BFS, and ++ * which is based on example originally by William Pugh. ++ * ++ * Skip Lists are a probabilistic alternative to balanced trees, as ++ * described in the June 1990 issue of CACM and were invented by ++ * William Pugh in 1987. ++ * ++ * A couple of comments about this implementation: ++ * ++ * This file only provides a infrastructure of skip list. ++ * ++ * skiplist_node is embedded into container data structure, to get rid ++ * the dependency of kmalloc/kfree operation in scheduler code. ++ * ++ * A customized search function should be defined using DEFINE_SKIPLIST_INSERT ++ * macro and be used for skip list insert operation. ++ * ++ * Random Level is also not defined in this file, instead, it should be ++ * customized implemented and set to node->level then pass to the customized ++ * skiplist_insert function. ++ * ++ * Levels start at zero and go up to (NUM_SKIPLIST_LEVEL -1) ++ * ++ * NUM_SKIPLIST_LEVEL in this implementation is 8 instead of origin 16, ++ * considering that there will be 256 entries to enable the top level when using ++ * random level p=0.5, and that number is more than enough for a run queue usage ++ * in a scheduler usage. And it also help to reduce the memory usage of the ++ * embedded skip list node in task_struct to about 50%. ++ * ++ * The insertion routine has been implemented so as to use the ++ * dirty hack described in the CACM paper: if a random level is ++ * generated that is more than the current maximum level, the ++ * current maximum level plus one is used instead. ++ * ++ * BFS Notes: In this implementation of skiplists, there are bidirectional ++ * next/prev pointers and the insert function returns a pointer to the actual ++ * node the value is stored. The key here is chosen by the scheduler so as to ++ * sort tasks according to the priority list requirements and is no longer used ++ * by the scheduler after insertion. The scheduler lookup, however, occurs in ++ * O(1) time because it is always the first item in the level 0 linked list. ++ * Since the task struct stores a copy of the node pointer upon skiplist_insert, ++ * it can also remove it much faster than the original implementation with the ++ * aid of prev<->next pointer manipulation and no searching. ++ */ ++#ifndef _LINUX_SKIP_LIST_H ++#define _LINUX_SKIP_LIST_H ++ ++#include ++ ++#define NUM_SKIPLIST_LEVEL (4) ++ ++struct skiplist_node { ++ int level; /* Levels in this node */ ++ struct skiplist_node *next[NUM_SKIPLIST_LEVEL]; ++ struct skiplist_node *prev[NUM_SKIPLIST_LEVEL]; ++}; ++ ++#define SKIPLIST_NODE_INIT(name) { 0,\ ++ {&name, &name, &name, &name},\ ++ {&name, &name, &name, &name},\ ++ } ++ ++/** ++ * INIT_SKIPLIST_NODE -- init a skiplist_node, expecially for header ++ * @node: the skip list node to be inited. ++ */ ++static inline void INIT_SKIPLIST_NODE(struct skiplist_node *node) ++{ ++ int i; ++ ++ node->level = 0; ++ for (i = 0; i < NUM_SKIPLIST_LEVEL; i++) { ++ WRITE_ONCE(node->next[i], node); ++ node->prev[i] = node; ++ } ++} ++ ++/** ++ * skiplist_entry - get the struct for this entry ++ * @ptr: the &struct skiplist_node pointer. ++ * @type: the type of the struct this is embedded in. ++ * @member: the name of the skiplist_node within the struct. ++ */ ++#define skiplist_entry(ptr, type, member) \ ++ container_of(ptr, type, member) ++ ++/** ++ * DEFINE_SKIPLIST_INSERT_FUNC -- macro to define a customized skip list insert ++ * function, which takes two parameters, first one is the header node of the ++ * skip list, second one is the skip list node to be inserted ++ * @func_name: the customized skip list insert function name ++ * @search_func: the search function to be used, which takes two parameters, ++ * 1st one is the itrator of skiplist_node in the list, the 2nd is the skip list ++ * node to be inserted, the function should return true if search should be ++ * continued, otherwise return false. ++ * Returns 1 if @node is inserted as the first item of skip list at level zero, ++ * otherwise 0 ++ */ ++#define DEFINE_SKIPLIST_INSERT_FUNC(func_name, search_func)\ ++static inline int func_name(struct skiplist_node *head, struct skiplist_node *node)\ ++{\ ++ struct skiplist_node *p, *q;\ ++ unsigned int k = head->level;\ ++ unsigned int l = node->level;\ ++\ ++ p = head;\ ++ if (l > k) {\ ++ l = node->level = ++head->level;\ ++\ ++ node->next[l] = head;\ ++ node->prev[l] = head;\ ++ head->next[l] = node;\ ++ head->prev[l] = node;\ ++\ ++ do {\ ++ while (q = p->next[k], q != head && search_func(q, node))\ ++ p = q;\ ++\ ++ node->prev[k] = p;\ ++ node->next[k] = q;\ ++ q->prev[k] = node;\ ++ p->next[k] = node;\ ++ } while (k--);\ ++\ ++ return (p == head);\ ++ }\ ++\ ++ while (k > l) {\ ++ while (q = p->next[k], q != head && search_func(q, node))\ ++ p = q;\ ++ k--;\ ++ }\ ++\ ++ do {\ ++ while (q = p->next[k], q != head && search_func(q, node))\ ++ p = q;\ ++\ ++ node->prev[k] = p;\ ++ node->next[k] = q;\ ++ q->prev[k] = node;\ ++ p->next[k] = node;\ ++ } while (k--);\ ++\ ++ return (p == head);\ ++} ++ ++/** ++ * skiplist_del_init -- delete skip list node from a skip list and reset it's ++ * init state ++ * @head: the header node of the skip list to be deleted from. ++ * @node: the skip list node to be deleted, the caller need to ensure @node is ++ * in skip list which @head represent. ++ * Returns 1 if @node is the first item of skip level at level zero, otherwise 0 ++ */ ++static inline int ++skiplist_del_init(struct skiplist_node *head, struct skiplist_node *node) ++{ ++ unsigned int i, level = node->level; ++ ++ for (i = 0; i <= level; i++) { ++ node->prev[i]->next[i] = node->next[i]; ++ node->next[i]->prev[i] = node->prev[i]; ++ } ++ if (level == head->level && level) { ++ while (head->next[level] == head && level) ++ level--; ++ head->level = level; ++ } ++ ++ return (node->prev[0] == head); ++} ++#endif /* _LINUX_SKIP_LIST_H */ +diff --git a/init/Kconfig b/init/Kconfig +index 5f5c776ef192..2529408ce0b5 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -779,9 +779,39 @@ config GENERIC_SCHED_CLOCK + + menu "Scheduler features" + ++menuconfig SCHED_ALT ++ bool "Alternative CPU Schedulers" ++ default y ++ help ++ This feature enable alternative CPU scheduler" ++ ++if SCHED_ALT ++ ++choice ++ prompt "Alternative CPU Scheduler" ++ default SCHED_BMQ ++ ++config SCHED_BMQ ++ bool "BMQ CPU scheduler" ++ help ++ The BitMap Queue CPU scheduler for excellent interactivity and ++ responsiveness on the desktop and solid scalability on normal ++ hardware and commodity servers. ++ ++config SCHED_PDS ++ bool "PDS CPU scheduler" ++ help ++ The Priority and Deadline based Skip list multiple queue CPU ++ Scheduler. ++ ++endchoice ++ ++endif ++ + config UCLAMP_TASK + bool "Enable utilization clamping for RT/FAIR tasks" + depends on CPU_FREQ_GOV_SCHEDUTIL ++ depends on !SCHED_ALT + help + This feature enables the scheduler to track the clamped utilization + of each CPU based on RUNNABLE tasks scheduled on that CPU. +@@ -867,6 +897,7 @@ config NUMA_BALANCING + depends on ARCH_SUPPORTS_NUMA_BALANCING + depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY + depends on SMP && NUMA && MIGRATION ++ depends on !SCHED_ALT + help + This option adds support for automatic NUMA aware memory/task placement. + The mechanism is quite primitive and is based on migrating memory when +@@ -959,6 +990,7 @@ config FAIR_GROUP_SCHED + depends on CGROUP_SCHED + default CGROUP_SCHED + ++if !SCHED_ALT + config CFS_BANDWIDTH + bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED" + depends on FAIR_GROUP_SCHED +@@ -981,6 +1013,7 @@ config RT_GROUP_SCHED + realtime bandwidth for them. + See Documentation/scheduler/sched-rt-group.rst for more information. + ++endif #!SCHED_ALT + endif #CGROUP_SCHED + + config UCLAMP_TASK_GROUP +@@ -1210,6 +1243,7 @@ config CHECKPOINT_RESTORE + + config SCHED_AUTOGROUP + bool "Automatic process group scheduling" ++ depends on !SCHED_ALT + select CGROUPS + select CGROUP_SCHED + select FAIR_GROUP_SCHED +diff --git a/init/init_task.c b/init/init_task.c +index 3711cdaafed2..47f57d2cc488 100644 +--- a/init/init_task.c ++++ b/init/init_task.c +@@ -75,9 +75,20 @@ struct task_struct init_task + .stack = init_stack, + .usage = REFCOUNT_INIT(2), + .flags = PF_KTHREAD, ++#ifdef CONFIG_SCHED_BMQ ++ .prio = DEFAULT_PRIO + MAX_PRIORITY_ADJ, ++ .static_prio = DEFAULT_PRIO, ++ .normal_prio = DEFAULT_PRIO + MAX_PRIORITY_ADJ, ++#endif ++#ifdef CONFIG_SCHED_PDS ++ .prio = MAX_RT_PRIO, ++ .static_prio = DEFAULT_PRIO, ++ .normal_prio = MAX_RT_PRIO, ++#else + .prio = MAX_PRIO - 20, + .static_prio = MAX_PRIO - 20, + .normal_prio = MAX_PRIO - 20, ++#endif + .policy = SCHED_NORMAL, + .cpus_ptr = &init_task.cpus_mask, + .cpus_mask = CPU_MASK_ALL, +@@ -87,6 +98,19 @@ struct task_struct init_task + .restart_block = { + .fn = do_no_restart_syscall, + }, ++#ifdef CONFIG_SCHED_ALT ++#ifdef CONFIG_SCHED_BMQ ++ .boost_prio = 0, ++ .bmq_idx = 15, ++ .bmq_node = LIST_HEAD_INIT(init_task.bmq_node), ++#endif ++#ifdef CONFIG_SCHED_PDS ++ .deadline = 0, ++ .sl_level = 0, ++ .sl_node = SKIPLIST_NODE_INIT(init_task.sl_node), ++#endif ++ .time_slice = HZ, ++#else + .se = { + .group_node = LIST_HEAD_INIT(init_task.se.group_node), + }, +@@ -94,6 +118,7 @@ struct task_struct init_task + .run_list = LIST_HEAD_INIT(init_task.rt.run_list), + .time_slice = RR_TIMESLICE, + }, ++#endif + .tasks = LIST_HEAD_INIT(init_task.tasks), + #ifdef CONFIG_SMP + .pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO), +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c +index 5258b68153e0..3eb670b1bb76 100644 +--- a/kernel/cgroup/cpuset.c ++++ b/kernel/cgroup/cpuset.c +@@ -636,7 +636,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) + return ret; + } + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT) + /* + * Helper routine for generate_sched_domains(). + * Do cpusets a, b have overlapping effective cpus_allowed masks? +@@ -1032,7 +1032,7 @@ static void rebuild_sched_domains_locked(void) + /* Have scheduler rebuild the domains */ + partition_and_rebuild_sched_domains(ndoms, doms, attr); + } +-#else /* !CONFIG_SMP */ ++#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */ + static void rebuild_sched_domains_locked(void) + { + } +diff --git a/kernel/delayacct.c b/kernel/delayacct.c +index 27725754ac99..769d773c7182 100644 +--- a/kernel/delayacct.c ++++ b/kernel/delayacct.c +@@ -106,7 +106,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) + */ + t1 = tsk->sched_info.pcount; + t2 = tsk->sched_info.run_delay; +- t3 = tsk->se.sum_exec_runtime; ++ t3 = tsk_seruntime(tsk); + + d->cpu_count += t1; + +diff --git a/kernel/exit.c b/kernel/exit.c +index 04029e35e69a..5ee0dc0b9175 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -122,7 +122,7 @@ static void __exit_signal(struct task_struct *tsk) + sig->curr_target = next_thread(tsk); + } + +- add_device_randomness((const void*) &tsk->se.sum_exec_runtime, ++ add_device_randomness((const void*) &tsk_seruntime(tsk), + sizeof(unsigned long long)); + + /* +@@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk) + sig->inblock += task_io_get_inblock(tsk); + sig->oublock += task_io_get_oublock(tsk); + task_io_accounting_add(&sig->ioac, &tsk->ioac); +- sig->sum_sched_runtime += tsk->se.sum_exec_runtime; ++ sig->sum_sched_runtime += tsk_seruntime(tsk); + sig->nr_threads--; + __unhash_process(tsk, group_dead); + write_sequnlock(&sig->stats_lock); +diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c +index f6310f848f34..4176ad070bc9 100644 +--- a/kernel/livepatch/transition.c ++++ b/kernel/livepatch/transition.c +@@ -306,7 +306,11 @@ static bool klp_try_switch_task(struct task_struct *task) + */ + rq = task_rq_lock(task, &flags); + ++#ifdef CONFIG_SCHED_ALT ++ if (task_running(task) && task != current) { ++#else + if (task_running(rq, task) && task != current) { ++#endif + snprintf(err_buf, STACK_ERR_BUF_SIZE, + "%s: %s:%d is running\n", __func__, task->comm, + task->pid); +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index 48fff6437901..40506d5b5a2e 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -227,14 +227,18 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, + * Only use with rt_mutex_waiter_{less,equal}() + */ + #define task_to_waiter(p) \ +- &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } ++ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = __tsk_deadline(p) } + + static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, + struct rt_mutex_waiter *right) + { ++#ifdef CONFIG_SCHED_PDS ++ return (left->deadline < right->deadline); ++#else + if (left->prio < right->prio) + return 1; + ++#ifndef CONFIG_SCHED_BMQ + /* + * If both waiters have dl_prio(), we check the deadlines of the + * associated tasks. +@@ -244,16 +248,22 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left, + */ + if (dl_prio(left->prio)) + return dl_time_before(left->deadline, right->deadline); ++#endif + + return 0; ++#endif + } + + static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, + struct rt_mutex_waiter *right) + { ++#ifdef CONFIG_SCHED_PDS ++ return (left->deadline == right->deadline); ++#else + if (left->prio != right->prio) + return 0; + ++#ifndef CONFIG_SCHED_BMQ + /* + * If both waiters have dl_prio(), we check the deadlines of the + * associated tasks. +@@ -263,8 +273,10 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left, + */ + if (dl_prio(left->prio)) + return left->deadline == right->deadline; ++#endif + + return 1; ++#endif + } + + #define __node_2_waiter(node) \ +@@ -660,7 +672,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, + * the values of the node being removed. + */ + waiter->prio = task->prio; +- waiter->deadline = task->dl.deadline; ++ waiter->deadline = __tsk_deadline(task); + + rt_mutex_enqueue(lock, waiter); + +@@ -933,7 +945,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + waiter->task = task; + waiter->lock = lock; + waiter->prio = task->prio; +- waiter->deadline = task->dl.deadline; ++ waiter->deadline = __tsk_deadline(task); + + /* Get the top priority waiter on the lock */ + if (rt_mutex_has_waiters(lock)) +diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile +index 5fc9c9b70862..eb6d7d87779f 100644 +--- a/kernel/sched/Makefile ++++ b/kernel/sched/Makefile +@@ -22,14 +22,20 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) + CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer + endif + +-obj-y += core.o loadavg.o clock.o cputime.o +-obj-y += idle.o fair.o rt.o deadline.o +-obj-y += wait.o wait_bit.o swait.o completion.o +- +-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o ++ifdef CONFIG_SCHED_ALT ++obj-y += alt_core.o alt_debug.o ++else ++obj-y += core.o ++obj-y += fair.o rt.o deadline.o ++obj-$(CONFIG_SMP) += cpudeadline.o stop_task.o + obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o +-obj-$(CONFIG_SCHEDSTATS) += stats.o + obj-$(CONFIG_SCHED_DEBUG) += debug.o ++endif ++obj-y += loadavg.o clock.o cputime.o ++obj-y += idle.o ++obj-y += wait.o wait_bit.o swait.o completion.o ++obj-$(CONFIG_SMP) += cpupri.o pelt.o topology.o ++obj-$(CONFIG_SCHEDSTATS) += stats.o + obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o + obj-$(CONFIG_CPU_FREQ) += cpufreq.o + obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o +diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c +new file mode 100644 +index 000000000000..c85e3ccf9302 +--- /dev/null ++++ b/kernel/sched/alt_core.c +@@ -0,0 +1,7137 @@ ++/* ++ * kernel/sched/alt_core.c ++ * ++ * Core alternative kernel scheduler code and related syscalls ++ * ++ * Copyright (C) 1991-2002 Linus Torvalds ++ * ++ * 2009-08-13 Brainfuck deadline scheduling policy by Con Kolivas deletes ++ * a whole lot of those previous things. ++ * 2017-09-06 Priority and Deadline based Skip list multiple queue kernel ++ * scheduler by Alfred Chen. ++ * 2019-02-20 BMQ(BitMap Queue) kernel scheduler by Alfred Chen. ++ */ ++#define CREATE_TRACE_POINTS ++#include ++#undef CREATE_TRACE_POINTS ++ ++#include "sched.h" ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++ ++#include "../workqueue_internal.h" ++#include "../../fs/io-wq.h" ++#include "../smpboot.h" ++ ++#include "pelt.h" ++#include "smp.h" ++ ++/* ++ * Export tracepoints that act as a bare tracehook (ie: have no trace event ++ * associated with them) to allow external modules to probe them. ++ */ ++EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); ++ ++#define ALT_SCHED_VERSION "v5.13-r0-Glitched" ++ ++/* rt_prio(prio) defined in include/linux/sched/rt.h */ ++#define rt_task(p) rt_prio((p)->prio) ++#define rt_policy(policy) ((policy) == SCHED_FIFO || (policy) == SCHED_RR) ++#define task_has_rt_policy(p) (rt_policy((p)->policy)) ++ ++#define STOP_PRIO (MAX_RT_PRIO - 1) ++ ++/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */ ++u64 sched_timeslice_ns __read_mostly = (4 * 1000 * 1000); ++ ++static int __init sched_timeslice(char *str) ++{ ++ int timeslice_us; ++ ++ get_option(&str, ×lice_us); ++ if (timeslice_us >= 1000) ++ sched_timeslice_ns = timeslice_us * 1000; ++ ++ return 0; ++} ++early_param("sched_timeslice", sched_timeslice); ++ ++/* Reschedule if less than this many μs left */ ++#define RESCHED_NS (100 * 1000) ++ ++/** ++ * sched_yield_type - Choose what sort of yield sched_yield will perform. ++ * 0: No yield. ++ * 1: Deboost and requeue task. (default) ++ * 2: Set rq skip task. ++ */ ++int sched_yield_type __read_mostly = 1; ++ ++#ifdef CONFIG_SMP ++static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp; ++ ++DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_affinity_masks); ++DEFINE_PER_CPU(cpumask_t *, sched_cpu_affinity_end_mask); ++ ++DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks); ++DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask); ++ ++#ifdef CONFIG_SCHED_SMT ++DEFINE_STATIC_KEY_FALSE(sched_smt_present); ++EXPORT_SYMBOL_GPL(sched_smt_present); ++#endif ++ ++/* ++ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of ++ * the domain), this allows us to quickly tell if two cpus are in the same cache ++ * domain, see cpus_share_cache(). ++ */ ++DEFINE_PER_CPU(int, sd_llc_id); ++#endif /* CONFIG_SMP */ ++ ++static DEFINE_MUTEX(sched_hotcpu_mutex); ++ ++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++ ++#ifndef prepare_arch_switch ++# define prepare_arch_switch(next) do { } while (0) ++#endif ++#ifndef finish_arch_post_lock_switch ++# define finish_arch_post_lock_switch() do { } while (0) ++#endif ++ ++#define IDLE_WM (IDLE_TASK_SCHED_PRIO) ++ ++#ifdef CONFIG_SCHED_SMT ++static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp; ++#endif ++static cpumask_t sched_rq_watermark[SCHED_BITS] ____cacheline_aligned_in_smp; ++ ++#ifdef CONFIG_SCHED_BMQ ++#include "bmq_imp.h" ++#endif ++#ifdef CONFIG_SCHED_PDS ++#include "pds_imp.h" ++#endif ++ ++static inline void update_sched_rq_watermark(struct rq *rq) ++{ ++ unsigned long watermark = sched_queue_watermark(rq); ++ unsigned long last_wm = rq->watermark; ++ unsigned long i; ++ int cpu; ++ ++ /*printk(KERN_INFO "sched: watermark(%d) %d, last %d\n", ++ cpu_of(rq), watermark, last_wm);*/ ++ if (watermark == last_wm) ++ return; ++ ++ rq->watermark = watermark; ++ cpu = cpu_of(rq); ++ if (watermark < last_wm) { ++ for (i = watermark + 1; i <= last_wm; i++) ++ cpumask_andnot(&sched_rq_watermark[i], ++ &sched_rq_watermark[i], cpumask_of(cpu)); ++#ifdef CONFIG_SCHED_SMT ++ if (!static_branch_likely(&sched_smt_present)) ++ return; ++ if (IDLE_WM == last_wm) ++ cpumask_andnot(&sched_sg_idle_mask, ++ &sched_sg_idle_mask, cpu_smt_mask(cpu)); ++#endif ++ return; ++ } ++ /* last_wm < watermark */ ++ for (i = last_wm + 1; i <= watermark; i++) ++ cpumask_set_cpu(cpu, &sched_rq_watermark[i]); ++#ifdef CONFIG_SCHED_SMT ++ if (!static_branch_likely(&sched_smt_present)) ++ return; ++ if (IDLE_WM == watermark) { ++ cpumask_t tmp; ++ cpumask_and(&tmp, cpu_smt_mask(cpu), &sched_rq_watermark[IDLE_WM]); ++ if (cpumask_equal(&tmp, cpu_smt_mask(cpu))) ++ cpumask_or(&sched_sg_idle_mask, cpu_smt_mask(cpu), ++ &sched_sg_idle_mask); ++ } ++#endif ++} ++ ++static inline struct task_struct *rq_runnable_task(struct rq *rq) ++{ ++ struct task_struct *next = sched_rq_first_task(rq); ++ ++ if (unlikely(next == rq->skip)) ++ next = sched_rq_next_task(next, rq); ++ ++ return next; ++} ++ ++/* ++ * Serialization rules: ++ * ++ * Lock order: ++ * ++ * p->pi_lock ++ * rq->lock ++ * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls) ++ * ++ * rq1->lock ++ * rq2->lock where: rq1 < rq2 ++ * ++ * Regular state: ++ * ++ * Normal scheduling state is serialized by rq->lock. __schedule() takes the ++ * local CPU's rq->lock, it optionally removes the task from the runqueue and ++ * always looks at the local rq data structures to find the most eligible task ++ * to run next. ++ * ++ * Task enqueue is also under rq->lock, possibly taken from another CPU. ++ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to ++ * the local CPU to avoid bouncing the runqueue state around [ see ++ * ttwu_queue_wakelist() ] ++ * ++ * Task wakeup, specifically wakeups that involve migration, are horribly ++ * complicated to avoid having to take two rq->locks. ++ * ++ * Special state: ++ * ++ * System-calls and anything external will use task_rq_lock() which acquires ++ * both p->pi_lock and rq->lock. As a consequence the state they change is ++ * stable while holding either lock: ++ * ++ * - sched_setaffinity()/ ++ * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed ++ * - set_user_nice(): p->se.load, p->*prio ++ * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio, ++ * p->se.load, p->rt_priority, ++ * p->dl.dl_{runtime, deadline, period, flags, bw, density} ++ * - sched_setnuma(): p->numa_preferred_nid ++ * - sched_move_task()/ ++ * cpu_cgroup_fork(): p->sched_task_group ++ * - uclamp_update_active() p->uclamp* ++ * ++ * p->state <- TASK_*: ++ * ++ * is changed locklessly using set_current_state(), __set_current_state() or ++ * set_special_state(), see their respective comments, or by ++ * try_to_wake_up(). This latter uses p->pi_lock to serialize against ++ * concurrent self. ++ * ++ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }: ++ * ++ * is set by activate_task() and cleared by deactivate_task(), under ++ * rq->lock. Non-zero indicates the task is runnable, the special ++ * ON_RQ_MIGRATING state is used for migration without holding both ++ * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock(). ++ * ++ * p->on_cpu <- { 0, 1 }: ++ * ++ * is set by prepare_task() and cleared by finish_task() such that it will be ++ * set before p is scheduled-in and cleared after p is scheduled-out, both ++ * under rq->lock. Non-zero indicates the task is running on its CPU. ++ * ++ * [ The astute reader will observe that it is possible for two tasks on one ++ * CPU to have ->on_cpu = 1 at the same time. ] ++ * ++ * task_cpu(p): is changed by set_task_cpu(), the rules are: ++ * ++ * - Don't call set_task_cpu() on a blocked task: ++ * ++ * We don't care what CPU we're not running on, this simplifies hotplug, ++ * the CPU assignment of blocked tasks isn't required to be valid. ++ * ++ * - for try_to_wake_up(), called under p->pi_lock: ++ * ++ * This allows try_to_wake_up() to only take one rq->lock, see its comment. ++ * ++ * - for migration called under rq->lock: ++ * [ see task_on_rq_migrating() in task_rq_lock() ] ++ * ++ * o move_queued_task() ++ * o detach_task() ++ * ++ * - for migration called under double_rq_lock(): ++ * ++ * o __migrate_swap_task() ++ * o push_rt_task() / pull_rt_task() ++ * o push_dl_task() / pull_dl_task() ++ * o dl_task_offline_migration() ++ * ++ */ ++ ++/* ++ * Context: p->pi_lock ++ */ ++static inline struct rq ++*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock) ++{ ++ struct rq *rq; ++ for (;;) { ++ rq = task_rq(p); ++ if (p->on_cpu || task_on_rq_queued(p)) { ++ raw_spin_lock(&rq->lock); ++ if (likely((p->on_cpu || task_on_rq_queued(p)) ++ && rq == task_rq(p))) { ++ *plock = &rq->lock; ++ return rq; ++ } ++ raw_spin_unlock(&rq->lock); ++ } else if (task_on_rq_migrating(p)) { ++ do { ++ cpu_relax(); ++ } while (unlikely(task_on_rq_migrating(p))); ++ } else { ++ *plock = NULL; ++ return rq; ++ } ++ } ++} ++ ++static inline void ++__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock) ++{ ++ if (NULL != lock) ++ raw_spin_unlock(lock); ++} ++ ++static inline struct rq ++*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock, ++ unsigned long *flags) ++{ ++ struct rq *rq; ++ for (;;) { ++ rq = task_rq(p); ++ if (p->on_cpu || task_on_rq_queued(p)) { ++ raw_spin_lock_irqsave(&rq->lock, *flags); ++ if (likely((p->on_cpu || task_on_rq_queued(p)) ++ && rq == task_rq(p))) { ++ *plock = &rq->lock; ++ return rq; ++ } ++ raw_spin_unlock_irqrestore(&rq->lock, *flags); ++ } else if (task_on_rq_migrating(p)) { ++ do { ++ cpu_relax(); ++ } while (unlikely(task_on_rq_migrating(p))); ++ } else { ++ raw_spin_lock_irqsave(&p->pi_lock, *flags); ++ if (likely(!p->on_cpu && !p->on_rq && ++ rq == task_rq(p))) { ++ *plock = &p->pi_lock; ++ return rq; ++ } ++ raw_spin_unlock_irqrestore(&p->pi_lock, *flags); ++ } ++ } ++} ++ ++static inline void ++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock, ++ unsigned long *flags) ++{ ++ raw_spin_unlock_irqrestore(lock, *flags); ++} ++ ++/* ++ * __task_rq_lock - lock the rq @p resides on. ++ */ ++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ lockdep_assert_held(&p->pi_lock); ++ ++ for (;;) { ++ rq = task_rq(p); ++ raw_spin_lock(&rq->lock); ++ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) ++ return rq; ++ raw_spin_unlock(&rq->lock); ++ ++ while (unlikely(task_on_rq_migrating(p))) ++ cpu_relax(); ++ } ++} ++ ++/* ++ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. ++ */ ++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(p->pi_lock) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ for (;;) { ++ raw_spin_lock_irqsave(&p->pi_lock, rf->flags); ++ rq = task_rq(p); ++ raw_spin_lock(&rq->lock); ++ /* ++ * move_queued_task() task_rq_lock() ++ * ++ * ACQUIRE (rq->lock) ++ * [S] ->on_rq = MIGRATING [L] rq = task_rq() ++ * WMB (__set_task_cpu()) ACQUIRE (rq->lock); ++ * [S] ->cpu = new_cpu [L] task_rq() ++ * [L] ->on_rq ++ * RELEASE (rq->lock) ++ * ++ * If we observe the old CPU in task_rq_lock(), the acquire of ++ * the old rq->lock will fully serialize against the stores. ++ * ++ * If we observe the new CPU in task_rq_lock(), the address ++ * dependency headed by '[L] rq = task_rq()' and the acquire ++ * will pair with the WMB to ensure we then also see migrating. ++ */ ++ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { ++ return rq; ++ } ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); ++ ++ while (unlikely(task_on_rq_migrating(p))) ++ cpu_relax(); ++ } ++} ++ ++static inline void ++rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ raw_spin_lock_irqsave(&rq->lock, rf->flags); ++} ++ ++static inline void ++rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock_irqrestore(&rq->lock, rf->flags); ++} ++ ++/* ++ * RQ-clock updating methods: ++ */ ++ ++static void update_rq_clock_task(struct rq *rq, s64 delta) ++{ ++/* ++ * In theory, the compile should just see 0 here, and optimize out the call ++ * to sched_rt_avg_update. But I don't trust it... ++ */ ++ s64 __maybe_unused steal = 0, irq_delta = 0; ++ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; ++ ++ /* ++ * Since irq_time is only updated on {soft,}irq_exit, we might run into ++ * this case when a previous update_rq_clock() happened inside a ++ * {soft,}irq region. ++ * ++ * When this happens, we stop ->clock_task and only update the ++ * prev_irq_time stamp to account for the part that fit, so that a next ++ * update will consume the rest. This ensures ->clock_task is ++ * monotonic. ++ * ++ * It does however cause some slight miss-attribution of {soft,}irq ++ * time, a more accurate solution would be to update the irq_time using ++ * the current rq->clock timestamp, except that would require using ++ * atomic ops. ++ */ ++ if (irq_delta > delta) ++ irq_delta = delta; ++ ++ rq->prev_irq_time += irq_delta; ++ delta -= irq_delta; ++#endif ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ if (static_key_false((¶virt_steal_rq_enabled))) { ++ steal = paravirt_steal_clock(cpu_of(rq)); ++ steal -= rq->prev_steal_time_rq; ++ ++ if (unlikely(steal > delta)) ++ steal = delta; ++ ++ rq->prev_steal_time_rq += steal; ++ delta -= steal; ++ } ++#endif ++ ++ rq->clock_task += delta; ++ ++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ ++ if ((irq_delta + steal)) ++ update_irq_load_avg(rq, irq_delta + steal); ++#endif ++} ++ ++static inline void update_rq_clock(struct rq *rq) ++{ ++ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; ++ ++ if (unlikely(delta <= 0)) ++ return; ++ rq->clock += delta; ++ update_rq_clock_task(rq, delta); ++} ++ ++#ifdef CONFIG_NO_HZ_FULL ++/* ++ * Tick may be needed by tasks in the runqueue depending on their policy and ++ * requirements. If tick is needed, lets send the target an IPI to kick it out ++ * of nohz mode if necessary. ++ */ ++static inline void sched_update_tick_dependency(struct rq *rq) ++{ ++ int cpu = cpu_of(rq); ++ ++ if (!tick_nohz_full_cpu(cpu)) ++ return; ++ ++ if (rq->nr_running < 2) ++ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); ++ else ++ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); ++} ++#else /* !CONFIG_NO_HZ_FULL */ ++static inline void sched_update_tick_dependency(struct rq *rq) { } ++#endif ++ ++/* ++ * Add/Remove/Requeue task to/from the runqueue routines ++ * Context: rq->lock ++ */ ++static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags) ++{ ++ lockdep_assert_held(&rq->lock); ++ ++ /*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/ ++ WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n", ++ task_cpu(p), cpu_of(rq)); ++ ++ __SCHED_DEQUEUE_TASK(p, rq, flags, update_sched_rq_watermark(rq)); ++ --rq->nr_running; ++#ifdef CONFIG_SMP ++ if (1 == rq->nr_running) ++ cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask); ++#endif ++ ++ sched_update_tick_dependency(rq); ++} ++ ++static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags) ++{ ++ lockdep_assert_held(&rq->lock); ++ ++ /*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/ ++ WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n", ++ task_cpu(p), cpu_of(rq)); ++ ++ __SCHED_ENQUEUE_TASK(p, rq, flags); ++ update_sched_rq_watermark(rq); ++ ++rq->nr_running; ++#ifdef CONFIG_SMP ++ if (2 == rq->nr_running) ++ cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask); ++#endif ++ ++ sched_update_tick_dependency(rq); ++} ++ ++static inline void requeue_task(struct task_struct *p, struct rq *rq) ++{ ++ lockdep_assert_held(&rq->lock); ++ /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/ ++ WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n", ++ cpu_of(rq), task_cpu(p)); ++ ++ __SCHED_REQUEUE_TASK(p, rq, update_sched_rq_watermark(rq)); ++} ++ ++/* ++ * cmpxchg based fetch_or, macro so it works for different integer types ++ */ ++#define fetch_or(ptr, mask) \ ++ ({ \ ++ typeof(ptr) _ptr = (ptr); \ ++ typeof(mask) _mask = (mask); \ ++ typeof(*_ptr) _old, _val = *_ptr; \ ++ \ ++ for (;;) { \ ++ _old = cmpxchg(_ptr, _val, _val | _mask); \ ++ if (_old == _val) \ ++ break; \ ++ _val = _old; \ ++ } \ ++ _old; \ ++}) ++ ++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) ++/* ++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, ++ * this avoids any races wrt polling state changes and thereby avoids ++ * spurious IPIs. ++ */ ++static bool set_nr_and_not_polling(struct task_struct *p) ++{ ++ struct thread_info *ti = task_thread_info(p); ++ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); ++} ++ ++/* ++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. ++ * ++ * If this returns true, then the idle task promises to call ++ * sched_ttwu_pending() and reschedule soon. ++ */ ++static bool set_nr_if_polling(struct task_struct *p) ++{ ++ struct thread_info *ti = task_thread_info(p); ++ typeof(ti->flags) old, val = READ_ONCE(ti->flags); ++ ++ for (;;) { ++ if (!(val & _TIF_POLLING_NRFLAG)) ++ return false; ++ if (val & _TIF_NEED_RESCHED) ++ return true; ++ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); ++ if (old == val) ++ break; ++ val = old; ++ } ++ return true; ++} ++ ++#else ++static bool set_nr_and_not_polling(struct task_struct *p) ++{ ++ set_tsk_need_resched(p); ++ return true; ++} ++ ++#ifdef CONFIG_SMP ++static bool set_nr_if_polling(struct task_struct *p) ++{ ++ return false; ++} ++#endif ++#endif ++ ++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) ++{ ++ struct wake_q_node *node = &task->wake_q; ++ ++ /* ++ * Atomically grab the task, if ->wake_q is !nil already it means ++ * it's already queued (either by us or someone else) and will get the ++ * wakeup due to that. ++ * ++ * In order to ensure that a pending wakeup will observe our pending ++ * state, even in the failed case, an explicit smp_mb() must be used. ++ */ ++ smp_mb__before_atomic(); ++ if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) ++ return false; ++ ++ /* ++ * The head is context local, there can be no concurrency. ++ */ ++ *head->lastp = node; ++ head->lastp = &node->next; ++ return true; ++} ++ ++/** ++ * wake_q_add() - queue a wakeup for 'later' waking. ++ * @head: the wake_q_head to add @task to ++ * @task: the task to queue for 'later' wakeup ++ * ++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the ++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come ++ * instantly. ++ * ++ * This function must be used as-if it were wake_up_process(); IOW the task ++ * must be ready to be woken at this location. ++ */ ++void wake_q_add(struct wake_q_head *head, struct task_struct *task) ++{ ++ if (__wake_q_add(head, task)) ++ get_task_struct(task); ++} ++ ++/** ++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking. ++ * @head: the wake_q_head to add @task to ++ * @task: the task to queue for 'later' wakeup ++ * ++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the ++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come ++ * instantly. ++ * ++ * This function must be used as-if it were wake_up_process(); IOW the task ++ * must be ready to be woken at this location. ++ * ++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers ++ * that already hold reference to @task can call the 'safe' version and trust ++ * wake_q to do the right thing depending whether or not the @task is already ++ * queued for wakeup. ++ */ ++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) ++{ ++ if (!__wake_q_add(head, task)) ++ put_task_struct(task); ++} ++ ++void wake_up_q(struct wake_q_head *head) ++{ ++ struct wake_q_node *node = head->first; ++ ++ while (node != WAKE_Q_TAIL) { ++ struct task_struct *task; ++ ++ task = container_of(node, struct task_struct, wake_q); ++ BUG_ON(!task); ++ /* task can safely be re-inserted now: */ ++ node = node->next; ++ task->wake_q.next = NULL; ++ ++ /* ++ * wake_up_process() executes a full barrier, which pairs with ++ * the queueing in wake_q_add() so as not to miss wakeups. ++ */ ++ wake_up_process(task); ++ put_task_struct(task); ++ } ++} ++ ++/* ++ * resched_curr - mark rq's current task 'to be rescheduled now'. ++ * ++ * On UP this means the setting of the need_resched flag, on SMP it ++ * might also involve a cross-CPU call to trigger the scheduler on ++ * the target CPU. ++ */ ++void resched_curr(struct rq *rq) ++{ ++ struct task_struct *curr = rq->curr; ++ int cpu; ++ ++ lockdep_assert_held(&rq->lock); ++ ++ if (test_tsk_need_resched(curr)) ++ return; ++ ++ cpu = cpu_of(rq); ++ if (cpu == smp_processor_id()) { ++ set_tsk_need_resched(curr); ++ set_preempt_need_resched(); ++ return; ++ } ++ ++ if (set_nr_and_not_polling(curr)) ++ smp_send_reschedule(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++void resched_cpu(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ if (cpu_online(cpu) || cpu == smp_processor_id()) ++ resched_curr(cpu_rq(cpu)); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++} ++ ++#ifdef CONFIG_SMP ++#ifdef CONFIG_NO_HZ_COMMON ++void nohz_balance_enter_idle(int cpu) {} ++ ++void select_nohz_load_balancer(int stop_tick) {} ++ ++void set_cpu_sd_state_idle(void) {} ++ ++/* ++ * In the semi idle case, use the nearest busy CPU for migrating timers ++ * from an idle CPU. This is good for power-savings. ++ * ++ * We don't do similar optimization for completely idle system, as ++ * selecting an idle CPU will add more delays to the timers than intended ++ * (as that CPU's timer base may not be uptodate wrt jiffies etc). ++ */ ++int get_nohz_timer_target(void) ++{ ++ int i, cpu = smp_processor_id(), default_cpu = -1; ++ struct cpumask *mask; ++ ++ if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) { ++ if (!idle_cpu(cpu)) ++ return cpu; ++ default_cpu = cpu; ++ } ++ ++ for (mask = per_cpu(sched_cpu_affinity_masks, cpu) + 1; ++ mask < per_cpu(sched_cpu_affinity_end_mask, cpu); mask++) ++ for_each_cpu_and(i, mask, housekeeping_cpumask(HK_FLAG_TIMER)) ++ if (!idle_cpu(i)) ++ return i; ++ ++ if (default_cpu == -1) ++ default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER); ++ cpu = default_cpu; ++ ++ return cpu; ++} ++ ++/* ++ * When add_timer_on() enqueues a timer into the timer wheel of an ++ * idle CPU then this timer might expire before the next timer event ++ * which is scheduled to wake up that CPU. In case of a completely ++ * idle system the next event might even be infinite time into the ++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and ++ * leaves the inner idle loop so the newly added timer is taken into ++ * account when the CPU goes back to idle and evaluates the timer ++ * wheel for the next timer event. ++ */ ++static inline void wake_up_idle_cpu(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (cpu == smp_processor_id()) ++ return; ++ ++ if (set_nr_and_not_polling(rq->idle)) ++ smp_send_reschedule(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++static inline bool wake_up_full_nohz_cpu(int cpu) ++{ ++ /* ++ * We just need the target to call irq_exit() and re-evaluate ++ * the next tick. The nohz full kick at least implies that. ++ * If needed we can still optimize that later with an ++ * empty IRQ. ++ */ ++ if (cpu_is_offline(cpu)) ++ return true; /* Don't try to wake offline CPUs. */ ++ if (tick_nohz_full_cpu(cpu)) { ++ if (cpu != smp_processor_id() || ++ tick_nohz_tick_stopped()) ++ tick_nohz_full_kick_cpu(cpu); ++ return true; ++ } ++ ++ return false; ++} ++ ++void wake_up_nohz_cpu(int cpu) ++{ ++ if (!wake_up_full_nohz_cpu(cpu)) ++ wake_up_idle_cpu(cpu); ++} ++ ++static void nohz_csd_func(void *info) ++{ ++ struct rq *rq = info; ++ int cpu = cpu_of(rq); ++ unsigned int flags; ++ ++ /* ++ * Release the rq::nohz_csd. ++ */ ++ flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu)); ++ WARN_ON(!(flags & NOHZ_KICK_MASK)); ++ ++ rq->idle_balance = idle_cpu(cpu); ++ if (rq->idle_balance && !need_resched()) { ++ rq->nohz_idle_balance = flags; ++ raise_softirq_irqoff(SCHED_SOFTIRQ); ++ } ++} ++ ++#endif /* CONFIG_NO_HZ_COMMON */ ++#endif /* CONFIG_SMP */ ++ ++static inline void check_preempt_curr(struct rq *rq) ++{ ++ if (sched_rq_first_task(rq) != rq->curr) ++ resched_curr(rq); ++} ++ ++#ifdef CONFIG_SCHED_HRTICK ++/* ++ * Use HR-timers to deliver accurate preemption points. ++ */ ++ ++static void hrtick_clear(struct rq *rq) ++{ ++ if (hrtimer_active(&rq->hrtick_timer)) ++ hrtimer_cancel(&rq->hrtick_timer); ++} ++ ++/* ++ * High-resolution timer tick. ++ * Runs from hardirq context with interrupts disabled. ++ */ ++static enum hrtimer_restart hrtick(struct hrtimer *timer) ++{ ++ struct rq *rq = container_of(timer, struct rq, hrtick_timer); ++ struct task_struct *p; ++ ++ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); ++ ++ raw_spin_lock(&rq->lock); ++ p = rq->curr; ++ p->time_slice = 0; ++ resched_curr(rq); ++ raw_spin_unlock(&rq->lock); ++ ++ return HRTIMER_NORESTART; ++} ++ ++/* ++ * Use hrtick when: ++ * - enabled by features ++ * - hrtimer is actually high res ++ */ ++static inline int hrtick_enabled(struct rq *rq) ++{ ++ /** ++ * Alt schedule FW doesn't support sched_feat yet ++ if (!sched_feat(HRTICK)) ++ return 0; ++ */ ++ if (!cpu_active(cpu_of(rq))) ++ return 0; ++ return hrtimer_is_hres_active(&rq->hrtick_timer); ++} ++ ++#ifdef CONFIG_SMP ++ ++static void __hrtick_restart(struct rq *rq) ++{ ++ struct hrtimer *timer = &rq->hrtick_timer; ++ ktime_t time = rq->hrtick_time; ++ ++ hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD); ++} ++ ++/* ++ * called from hardirq (IPI) context ++ */ ++static void __hrtick_start(void *arg) ++{ ++ struct rq *rq = arg; ++ ++ raw_spin_lock(&rq->lock); ++ __hrtick_restart(rq); ++ raw_spin_unlock(&rq->lock); ++} ++ ++/* ++ * Called to set the hrtick timer state. ++ * ++ * called with rq->lock held and irqs disabled ++ */ ++void hrtick_start(struct rq *rq, u64 delay) ++{ ++ struct hrtimer *timer = &rq->hrtick_timer; ++ s64 delta; ++ ++ /* ++ * Don't schedule slices shorter than 10000ns, that just ++ * doesn't make sense and can cause timer DoS. ++ */ ++ delta = max_t(s64, delay, 10000LL); ++ ++ rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); ++ ++ if (rq == this_rq()) ++ __hrtick_restart(rq); ++ else ++ smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); ++} ++ ++#else ++/* ++ * Called to set the hrtick timer state. ++ * ++ * called with rq->lock held and irqs disabled ++ */ ++void hrtick_start(struct rq *rq, u64 delay) ++{ ++ /* ++ * Don't schedule slices shorter than 10000ns, that just ++ * doesn't make sense. Rely on vruntime for fairness. ++ */ ++ delay = max_t(u64, delay, 10000LL); ++ hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), ++ HRTIMER_MODE_REL_PINNED_HARD); ++} ++#endif /* CONFIG_SMP */ ++ ++static void hrtick_rq_init(struct rq *rq) ++{ ++#ifdef CONFIG_SMP ++ INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); ++#endif ++ ++ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); ++ rq->hrtick_timer.function = hrtick; ++} ++#else /* CONFIG_SCHED_HRTICK */ ++static inline int hrtick_enabled(struct rq *rq) ++{ ++ return 0; ++} ++ ++static inline void hrtick_clear(struct rq *rq) ++{ ++} ++ ++static inline void hrtick_rq_init(struct rq *rq) ++{ ++} ++#endif /* CONFIG_SCHED_HRTICK */ ++ ++/* ++ * Calculate the current priority, i.e. the priority ++ * taken into account by the scheduler. This value might ++ * be boosted by RT tasks as it will be RT if the task got ++ * RT-boosted. If not then it returns p->normal_prio. ++ */ ++static int effective_prio(struct task_struct *p) ++{ ++ p->normal_prio = normal_prio(p); ++ /* ++ * If we are RT tasks or we were boosted to RT priority, ++ * keep the priority unchanged. Otherwise, update priority ++ * to the normal priority: ++ */ ++ if (!rt_prio(p->prio)) ++ return p->normal_prio; ++ return p->prio; ++} ++ ++/* ++ * activate_task - move a task to the runqueue. ++ * ++ * Context: rq->lock ++ */ ++static void activate_task(struct task_struct *p, struct rq *rq) ++{ ++ enqueue_task(p, rq, ENQUEUE_WAKEUP); ++ p->on_rq = TASK_ON_RQ_QUEUED; ++ ++ /* ++ * If in_iowait is set, the code below may not trigger any cpufreq ++ * utilization updates, so do it here explicitly with the IOWAIT flag ++ * passed. ++ */ ++ cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait); ++} ++ ++/* ++ * deactivate_task - remove a task from the runqueue. ++ * ++ * Context: rq->lock ++ */ ++static inline void deactivate_task(struct task_struct *p, struct rq *rq) ++{ ++ dequeue_task(p, rq, DEQUEUE_SLEEP); ++ p->on_rq = 0; ++ cpufreq_update_util(rq, 0); ++} ++ ++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) ++{ ++#ifdef CONFIG_SMP ++ /* ++ * After ->cpu is set up to a new value, task_access_lock(p, ...) can be ++ * successfully executed on another CPU. We must ensure that updates of ++ * per-task data have been completed by this moment. ++ */ ++ smp_wmb(); ++ ++#ifdef CONFIG_THREAD_INFO_IN_TASK ++ WRITE_ONCE(p->cpu, cpu); ++#else ++ WRITE_ONCE(task_thread_info(p)->cpu, cpu); ++#endif ++#endif ++} ++ ++static inline bool is_migration_disabled(struct task_struct *p) ++{ ++#ifdef CONFIG_SMP ++ return p->migration_disabled; ++#else ++ return false; ++#endif ++} ++ ++#define SCA_CHECK 0x01 ++ ++#ifdef CONFIG_SMP ++ ++void set_task_cpu(struct task_struct *p, unsigned int new_cpu) ++{ ++#ifdef CONFIG_SCHED_DEBUG ++ /* ++ * We should never call set_task_cpu() on a blocked task, ++ * ttwu() will sort out the placement. ++ */ ++ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && ++ !p->on_rq); ++#ifdef CONFIG_LOCKDEP ++ /* ++ * The caller should hold either p->pi_lock or rq->lock, when changing ++ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. ++ * ++ * sched_move_task() holds both and thus holding either pins the cgroup, ++ * see task_group(). ++ */ ++ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || ++ lockdep_is_held(&task_rq(p)->lock))); ++#endif ++ /* ++ * Clearly, migrating tasks to offline CPUs is a fairly daft thing. ++ */ ++ WARN_ON_ONCE(!cpu_online(new_cpu)); ++ ++ WARN_ON_ONCE(is_migration_disabled(p)); ++#endif ++ if (task_cpu(p) == new_cpu) ++ return; ++ trace_sched_migrate_task(p, new_cpu); ++ rseq_migrate(p); ++ perf_event_task_migrate(p); ++ ++ __set_task_cpu(p, new_cpu); ++} ++ ++#define MDF_FORCE_ENABLED 0x80 ++ ++static void ++__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ /* ++ * This here violates the locking rules for affinity, since we're only ++ * supposed to change these variables while holding both rq->lock and ++ * p->pi_lock. ++ * ++ * HOWEVER, it magically works, because ttwu() is the only code that ++ * accesses these variables under p->pi_lock and only does so after ++ * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() ++ * before finish_task(). ++ * ++ * XXX do further audits, this smells like something putrid. ++ */ ++ SCHED_WARN_ON(!p->on_cpu); ++ p->cpus_ptr = new_mask; ++} ++ ++void migrate_disable(void) ++{ ++ struct task_struct *p = current; ++ int cpu; ++ ++ if (p->migration_disabled) { ++ p->migration_disabled++; ++ return; ++ } ++ ++ preempt_disable(); ++ cpu = smp_processor_id(); ++ if (cpumask_test_cpu(cpu, &p->cpus_mask)) { ++ cpu_rq(cpu)->nr_pinned++; ++ p->migration_disabled = 1; ++ p->migration_flags &= ~MDF_FORCE_ENABLED; ++ ++ /* ++ * Violates locking rules! see comment in __do_set_cpus_ptr(). ++ */ ++ if (p->cpus_ptr == &p->cpus_mask) ++ __do_set_cpus_ptr(p, cpumask_of(cpu)); ++ } ++ preempt_enable(); ++} ++EXPORT_SYMBOL_GPL(migrate_disable); ++ ++void migrate_enable(void) ++{ ++ struct task_struct *p = current; ++ ++ if (0 == p->migration_disabled) ++ return; ++ ++ if (p->migration_disabled > 1) { ++ p->migration_disabled--; ++ return; ++ } ++ ++ /* ++ * Ensure stop_task runs either before or after this, and that ++ * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). ++ */ ++ preempt_disable(); ++ /* ++ * Assumption: current should be running on allowed cpu ++ */ ++ WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask)); ++ if (p->cpus_ptr != &p->cpus_mask) ++ __do_set_cpus_ptr(p, &p->cpus_mask); ++ /* ++ * Mustn't clear migration_disabled() until cpus_ptr points back at the ++ * regular cpus_mask, otherwise things that race (eg. ++ * select_fallback_rq) get confused. ++ */ ++ barrier(); ++ p->migration_disabled = 0; ++ this_rq()->nr_pinned--; ++ preempt_enable(); ++} ++EXPORT_SYMBOL_GPL(migrate_enable); ++ ++static inline bool rq_has_pinned_tasks(struct rq *rq) ++{ ++ return rq->nr_pinned; ++} ++ ++/* ++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see ++ * __set_cpus_allowed_ptr() and select_fallback_rq(). ++ */ ++static inline bool is_cpu_allowed(struct task_struct *p, int cpu) ++{ ++ /* When not in the task's cpumask, no point in looking further. */ ++ if (!cpumask_test_cpu(cpu, p->cpus_ptr)) ++ return false; ++ ++ /* migrate_disabled() must be allowed to finish. */ ++ if (is_migration_disabled(p)) ++ return cpu_online(cpu); ++ ++ /* Non kernel threads are not allowed during either online or offline. */ ++ if (!(p->flags & PF_KTHREAD)) ++ return cpu_active(cpu); ++ ++ /* KTHREAD_IS_PER_CPU is always allowed. */ ++ if (kthread_is_per_cpu(p)) ++ return cpu_online(cpu); ++ ++ /* Regular kernel threads don't get to stay during offline. */ ++ if (cpu_rq(cpu)->balance_push) ++ return false; ++ ++ /* But are allowed during online. */ ++ return cpu_online(cpu); ++} ++ ++/* ++ * This is how migration works: ++ * ++ * 1) we invoke migration_cpu_stop() on the target CPU using ++ * stop_one_cpu(). ++ * 2) stopper starts to run (implicitly forcing the migrated thread ++ * off the CPU) ++ * 3) it checks whether the migrated task is still in the wrong runqueue. ++ * 4) if it's in the wrong runqueue then the migration thread removes ++ * it and puts it into the right queue. ++ * 5) stopper completes and stop_one_cpu() returns and the migration ++ * is done. ++ */ ++ ++/* ++ * move_queued_task - move a queued task to new rq. ++ * ++ * Returns (locked) new rq. Old rq's lock is released. ++ */ ++static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int ++ new_cpu) ++{ ++ lockdep_assert_held(&rq->lock); ++ ++ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); ++ dequeue_task(p, rq, 0); ++ set_task_cpu(p, new_cpu); ++ raw_spin_unlock(&rq->lock); ++ ++ rq = cpu_rq(new_cpu); ++ ++ raw_spin_lock(&rq->lock); ++ BUG_ON(task_cpu(p) != new_cpu); ++ enqueue_task(p, rq, 0); ++ p->on_rq = TASK_ON_RQ_QUEUED; ++ check_preempt_curr(rq); ++ ++ return rq; ++} ++ ++struct migration_arg { ++ struct task_struct *task; ++ int dest_cpu; ++}; ++ ++/* ++ * Move (not current) task off this CPU, onto the destination CPU. We're doing ++ * this because either it can't run here any more (set_cpus_allowed() ++ * away from this CPU, or CPU going down), or because we're ++ * attempting to rebalance this task on exec (sched_exec). ++ * ++ * So we race with normal scheduler movements, but that's OK, as long ++ * as the task is no longer on this CPU. ++ */ ++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int ++ dest_cpu) ++{ ++ /* Affinity changed (again). */ ++ if (!is_cpu_allowed(p, dest_cpu)) ++ return rq; ++ ++ update_rq_clock(rq); ++ return move_queued_task(rq, p, dest_cpu); ++} ++ ++/* ++ * migration_cpu_stop - this will be executed by a highprio stopper thread ++ * and performs thread migration by bumping thread off CPU then ++ * 'pushing' onto another runqueue. ++ */ ++static int migration_cpu_stop(void *data) ++{ ++ struct migration_arg *arg = data; ++ struct task_struct *p = arg->task; ++ struct rq *rq = this_rq(); ++ ++ /* ++ * The original target CPU might have gone down and we might ++ * be on another CPU but it doesn't matter. ++ */ ++ local_irq_disable(); ++ /* ++ * We need to explicitly wake pending tasks before running ++ * __migrate_task() such that we will not miss enforcing cpus_ptr ++ * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. ++ */ ++ flush_smp_call_function_from_idle(); ++ ++ raw_spin_lock(&p->pi_lock); ++ raw_spin_lock(&rq->lock); ++ /* ++ * If task_rq(p) != rq, it cannot be migrated here, because we're ++ * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because ++ * we're holding p->pi_lock. ++ */ ++ if (task_rq(p) == rq && task_on_rq_queued(p)) ++ rq = __migrate_task(rq, p, arg->dest_cpu); ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock(&p->pi_lock); ++ ++ local_irq_enable(); ++ return 0; ++} ++ ++static inline void ++set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ cpumask_copy(&p->cpus_mask, new_mask); ++ p->nr_cpus_allowed = cpumask_weight(new_mask); ++} ++ ++static void ++__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ lockdep_assert_held(&p->pi_lock); ++ set_cpus_allowed_common(p, new_mask); ++} ++ ++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ __do_set_cpus_allowed(p, new_mask); ++} ++ ++#endif ++ ++/** ++ * task_curr - is this task currently executing on a CPU? ++ * @p: the task in question. ++ * ++ * Return: 1 if the task is currently executing. 0 otherwise. ++ */ ++inline int task_curr(const struct task_struct *p) ++{ ++ return cpu_curr(task_cpu(p)) == p; ++} ++ ++#ifdef CONFIG_SMP ++/* ++ * wait_task_inactive - wait for a thread to unschedule. ++ * ++ * If @match_state is nonzero, it's the @p->state value just checked and ++ * not expected to change. If it changes, i.e. @p might have woken up, ++ * then return zero. When we succeed in waiting for @p to be off its CPU, ++ * we return a positive number (its total switch count). If a second call ++ * a short while later returns the same number, the caller can be sure that ++ * @p has remained unscheduled the whole time. ++ * ++ * The caller must ensure that the task *will* unschedule sometime soon, ++ * else this function might spin for a *long* time. This function can't ++ * be called with interrupts off, or it may introduce deadlock with ++ * smp_call_function() if an IPI is sent by the same process we are ++ * waiting to become inactive. ++ */ ++unsigned long wait_task_inactive(struct task_struct *p, long match_state) ++{ ++ unsigned long flags; ++ bool running, on_rq; ++ unsigned long ncsw; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ ++ for (;;) { ++ rq = task_rq(p); ++ ++ /* ++ * If the task is actively running on another CPU ++ * still, just relax and busy-wait without holding ++ * any locks. ++ * ++ * NOTE! Since we don't hold any locks, it's not ++ * even sure that "rq" stays as the right runqueue! ++ * But we don't care, since this will return false ++ * if the runqueue has changed and p is actually now ++ * running somewhere else! ++ */ ++ while (task_running(p) && p == rq->curr) { ++ if (match_state && unlikely(p->state != match_state)) ++ return 0; ++ cpu_relax(); ++ } ++ ++ /* ++ * Ok, time to look more closely! We need the rq ++ * lock now, to be *sure*. If we're wrong, we'll ++ * just go back and repeat. ++ */ ++ task_access_lock_irqsave(p, &lock, &flags); ++ trace_sched_wait_task(p); ++ running = task_running(p); ++ on_rq = p->on_rq; ++ ncsw = 0; ++ if (!match_state || p->state == match_state) ++ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ ++ task_access_unlock_irqrestore(p, lock, &flags); ++ ++ /* ++ * If it changed from the expected state, bail out now. ++ */ ++ if (unlikely(!ncsw)) ++ break; ++ ++ /* ++ * Was it really running after all now that we ++ * checked with the proper locks actually held? ++ * ++ * Oops. Go back and try again.. ++ */ ++ if (unlikely(running)) { ++ cpu_relax(); ++ continue; ++ } ++ ++ /* ++ * It's not enough that it's not actively running, ++ * it must be off the runqueue _entirely_, and not ++ * preempted! ++ * ++ * So if it was still runnable (but just not actively ++ * running right now), it's preempted, and we should ++ * yield - it could be a while. ++ */ ++ if (unlikely(on_rq)) { ++ ktime_t to = NSEC_PER_SEC / HZ; ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_hrtimeout(&to, HRTIMER_MODE_REL); ++ continue; ++ } ++ ++ /* ++ * Ahh, all good. It wasn't running, and it wasn't ++ * runnable, which means that it will never become ++ * running in the future either. We're all done! ++ */ ++ break; ++ } ++ ++ return ncsw; ++} ++ ++/*** ++ * kick_process - kick a running thread to enter/exit the kernel ++ * @p: the to-be-kicked thread ++ * ++ * Cause a process which is running on another CPU to enter ++ * kernel-mode, without any delay. (to get signals handled.) ++ * ++ * NOTE: this function doesn't have to take the runqueue lock, ++ * because all it wants to ensure is that the remote task enters ++ * the kernel. If the IPI races and the task has been migrated ++ * to another CPU then no harm is done and the purpose has been ++ * achieved as well. ++ */ ++void kick_process(struct task_struct *p) ++{ ++ int cpu; ++ ++ preempt_disable(); ++ cpu = task_cpu(p); ++ if ((cpu != smp_processor_id()) && task_curr(p)) ++ smp_send_reschedule(cpu); ++ preempt_enable(); ++} ++EXPORT_SYMBOL_GPL(kick_process); ++ ++/* ++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock ++ * ++ * A few notes on cpu_active vs cpu_online: ++ * ++ * - cpu_active must be a subset of cpu_online ++ * ++ * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, ++ * see __set_cpus_allowed_ptr(). At this point the newly online ++ * CPU isn't yet part of the sched domains, and balancing will not ++ * see it. ++ * ++ * - on cpu-down we clear cpu_active() to mask the sched domains and ++ * avoid the load balancer to place new tasks on the to be removed ++ * CPU. Existing tasks will remain running there and will be taken ++ * off. ++ * ++ * This means that fallback selection must not select !active CPUs. ++ * And can assume that any active CPU must be online. Conversely ++ * select_task_rq() below may allow selection of !active CPUs in order ++ * to satisfy the above rules. ++ */ ++static int select_fallback_rq(int cpu, struct task_struct *p) ++{ ++ int nid = cpu_to_node(cpu); ++ const struct cpumask *nodemask = NULL; ++ enum { cpuset, possible, fail } state = cpuset; ++ int dest_cpu; ++ ++ /* ++ * If the node that the CPU is on has been offlined, cpu_to_node() ++ * will return -1. There is no CPU on the node, and we should ++ * select the CPU on the other node. ++ */ ++ if (nid != -1) { ++ nodemask = cpumask_of_node(nid); ++ ++ /* Look for allowed, online CPU in same node. */ ++ for_each_cpu(dest_cpu, nodemask) { ++ if (!cpu_active(dest_cpu)) ++ continue; ++ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) ++ return dest_cpu; ++ } ++ } ++ ++ for (;;) { ++ /* Any allowed, online CPU? */ ++ for_each_cpu(dest_cpu, p->cpus_ptr) { ++ if (!is_cpu_allowed(p, dest_cpu)) ++ continue; ++ goto out; ++ } ++ ++ /* No more Mr. Nice Guy. */ ++ switch (state) { ++ case cpuset: ++ if (IS_ENABLED(CONFIG_CPUSETS)) { ++ cpuset_cpus_allowed_fallback(p); ++ state = possible; ++ break; ++ } ++ fallthrough; ++ case possible: ++ /* ++ * XXX When called from select_task_rq() we only ++ * hold p->pi_lock and again violate locking order. ++ * ++ * More yuck to audit. ++ */ ++ do_set_cpus_allowed(p, cpu_possible_mask); ++ state = fail; ++ break; ++ ++ case fail: ++ BUG(); ++ break; ++ } ++ } ++ ++out: ++ if (state != cpuset) { ++ /* ++ * Don't tell them about moving exiting tasks or ++ * kernel threads (both mm NULL), since they never ++ * leave kernel. ++ */ ++ if (p->mm && printk_ratelimit()) { ++ printk_deferred("process %d (%s) no longer affine to cpu%d\n", ++ task_pid_nr(p), p->comm, cpu); ++ } ++ } ++ ++ return dest_cpu; ++} ++ ++static inline int select_task_rq(struct task_struct *p, struct rq *rq) ++{ ++ cpumask_t chk_mask, tmp; ++ ++ if (unlikely(!cpumask_and(&chk_mask, p->cpus_ptr, cpu_active_mask))) ++ return select_fallback_rq(task_cpu(p), p); ++ ++ if ( ++#ifdef CONFIG_SCHED_SMT ++ cpumask_and(&tmp, &chk_mask, &sched_sg_idle_mask) || ++#endif ++ cpumask_and(&tmp, &chk_mask, &sched_rq_watermark[IDLE_WM]) || ++ cpumask_and(&tmp, &chk_mask, ++ &sched_rq_watermark[task_sched_prio(p, rq) + 1])) ++ return best_mask_cpu(task_cpu(p), &tmp); ++ ++ return best_mask_cpu(task_cpu(p), &chk_mask); ++} ++ ++void sched_set_stop_task(int cpu, struct task_struct *stop) ++{ ++ static struct lock_class_key stop_pi_lock; ++ struct sched_param stop_param = { .sched_priority = STOP_PRIO }; ++ struct sched_param start_param = { .sched_priority = 0 }; ++ struct task_struct *old_stop = cpu_rq(cpu)->stop; ++ ++ if (stop) { ++ /* ++ * Make it appear like a SCHED_FIFO task, its something ++ * userspace knows about and won't get confused about. ++ * ++ * Also, it will make PI more or less work without too ++ * much confusion -- but then, stop work should not ++ * rely on PI working anyway. ++ */ ++ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param); ++ ++ /* ++ * The PI code calls rt_mutex_setprio() with ->pi_lock held to ++ * adjust the effective priority of a task. As a result, ++ * rt_mutex_setprio() can trigger (RT) balancing operations, ++ * which can then trigger wakeups of the stop thread to push ++ * around the current task. ++ * ++ * The stop task itself will never be part of the PI-chain, it ++ * never blocks, therefore that ->pi_lock recursion is safe. ++ * Tell lockdep about this by placing the stop->pi_lock in its ++ * own class. ++ */ ++ lockdep_set_class(&stop->pi_lock, &stop_pi_lock); ++ } ++ ++ cpu_rq(cpu)->stop = stop; ++ ++ if (old_stop) { ++ /* ++ * Reset it back to a normal scheduling policy so that ++ * it can die in pieces. ++ */ ++ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param); ++ } ++} ++ ++/* ++ * Change a given task's CPU affinity. Migrate the thread to a ++ * proper CPU and schedule it away if the CPU it's executing on ++ * is removed from the allowed bitmask. ++ * ++ * NOTE: the caller must have a valid reference to the task, the ++ * task must not exit() & deallocate itself prematurely. The ++ * call is not atomic; no spinlocks may be held. ++ */ ++static int __set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, ++ u32 flags) ++{ ++ const struct cpumask *cpu_valid_mask = cpu_active_mask; ++ int dest_cpu; ++ unsigned long irq_flags; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ int ret = 0; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, irq_flags); ++ rq = __task_access_lock(p, &lock); ++ ++ if (p->flags & PF_KTHREAD || is_migration_disabled(p)) { ++ /* ++ * Kernel threads are allowed on online && !active CPUs, ++ * however, during cpu-hot-unplug, even these might get pushed ++ * away if not KTHREAD_IS_PER_CPU. ++ * ++ * Specifically, migration_disabled() tasks must not fail the ++ * cpumask_any_and_distribute() pick below, esp. so on ++ * SCA_MIGRATE_ENABLE, otherwise we'll not call ++ * set_cpus_allowed_common() and actually reset p->cpus_ptr. ++ */ ++ cpu_valid_mask = cpu_online_mask; ++ } ++ ++ /* ++ * Must re-check here, to close a race against __kthread_bind(), ++ * sched_setaffinity() is not guaranteed to observe the flag. ++ */ ++ if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (cpumask_equal(&p->cpus_mask, new_mask)) ++ goto out; ++ ++ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); ++ if (dest_cpu >= nr_cpu_ids) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ __do_set_cpus_allowed(p, new_mask); ++ ++ /* Can the task run on the task's current CPU? If so, we're done */ ++ if (cpumask_test_cpu(task_cpu(p), new_mask)) ++ goto out; ++ ++ if (p->migration_disabled) { ++ if (likely(p->cpus_ptr != &p->cpus_mask)) ++ __do_set_cpus_ptr(p, &p->cpus_mask); ++ p->migration_disabled = 0; ++ p->migration_flags |= MDF_FORCE_ENABLED; ++ /* When p is migrate_disabled, rq->lock should be held */ ++ rq->nr_pinned--; ++ } ++ ++ if (task_running(p) || p->state == TASK_WAKING) { ++ struct migration_arg arg = { p, dest_cpu }; ++ ++ /* Need help from migration thread: drop lock and wait. */ ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags); ++ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); ++ return 0; ++ } ++ if (task_on_rq_queued(p)) { ++ /* ++ * OK, since we're going to drop the lock immediately ++ * afterwards anyway. ++ */ ++ update_rq_clock(rq); ++ rq = move_queued_task(rq, p, dest_cpu); ++ lock = &rq->lock; ++ } ++ ++out: ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags); ++ ++ return ret; ++} ++ ++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ return __set_cpus_allowed_ptr(p, new_mask, 0); ++} ++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); ++ ++#else /* CONFIG_SMP */ ++ ++static inline int select_task_rq(struct task_struct *p, struct rq *rq) ++{ ++ return 0; ++} ++ ++static inline int ++__set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, ++ u32 flags) ++{ ++ return set_cpus_allowed_ptr(p, new_mask); ++} ++ ++static inline bool rq_has_pinned_tasks(struct rq *rq) ++{ ++ return false; ++} ++ ++#endif /* !CONFIG_SMP */ ++ ++static void ++ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ++{ ++ struct rq *rq; ++ ++ if (!schedstat_enabled()) ++ return; ++ ++ rq = this_rq(); ++ ++#ifdef CONFIG_SMP ++ if (cpu == rq->cpu) ++ __schedstat_inc(rq->ttwu_local); ++ else { ++ /** Alt schedule FW ToDo: ++ * How to do ttwu_wake_remote ++ */ ++ } ++#endif /* CONFIG_SMP */ ++ ++ __schedstat_inc(rq->ttwu_count); ++} ++ ++/* ++ * Mark the task runnable and perform wakeup-preemption. ++ */ ++static inline void ++ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) ++{ ++ check_preempt_curr(rq); ++ p->state = TASK_RUNNING; ++ trace_sched_wakeup(p); ++} ++ ++static inline void ++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) ++{ ++ if (p->sched_contributes_to_load) ++ rq->nr_uninterruptible--; ++ ++ if ( ++#ifdef CONFIG_SMP ++ !(wake_flags & WF_MIGRATED) && ++#endif ++ p->in_iowait) { ++ delayacct_blkio_end(p); ++ atomic_dec(&task_rq(p)->nr_iowait); ++ } ++ ++ activate_task(p, rq); ++ ttwu_do_wakeup(rq, p, 0); ++} ++ ++/* ++ * Consider @p being inside a wait loop: ++ * ++ * for (;;) { ++ * set_current_state(TASK_UNINTERRUPTIBLE); ++ * ++ * if (CONDITION) ++ * break; ++ * ++ * schedule(); ++ * } ++ * __set_current_state(TASK_RUNNING); ++ * ++ * between set_current_state() and schedule(). In this case @p is still ++ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in ++ * an atomic manner. ++ * ++ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq ++ * then schedule() must still happen and p->state can be changed to ++ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we ++ * need to do a full wakeup with enqueue. ++ * ++ * Returns: %true when the wakeup is done, ++ * %false otherwise. ++ */ ++static int ttwu_runnable(struct task_struct *p, int wake_flags) ++{ ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ int ret = 0; ++ ++ rq = __task_access_lock(p, &lock); ++ if (task_on_rq_queued(p)) { ++ /* check_preempt_curr() may use rq clock */ ++ update_rq_clock(rq); ++ ttwu_do_wakeup(rq, p, wake_flags); ++ ret = 1; ++ } ++ __task_access_unlock(p, lock); ++ ++ return ret; ++} ++ ++#ifdef CONFIG_SMP ++void sched_ttwu_pending(void *arg) ++{ ++ struct llist_node *llist = arg; ++ struct rq *rq = this_rq(); ++ struct task_struct *p, *t; ++ struct rq_flags rf; ++ ++ if (!llist) ++ return; ++ ++ /* ++ * rq::ttwu_pending racy indication of out-standing wakeups. ++ * Races such that false-negatives are possible, since they ++ * are shorter lived that false-positives would be. ++ */ ++ WRITE_ONCE(rq->ttwu_pending, 0); ++ ++ rq_lock_irqsave(rq, &rf); ++ update_rq_clock(rq); ++ ++ llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { ++ if (WARN_ON_ONCE(p->on_cpu)) ++ smp_cond_load_acquire(&p->on_cpu, !VAL); ++ ++ if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) ++ set_task_cpu(p, cpu_of(rq)); ++ ++ ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0); ++ } ++ ++ rq_unlock_irqrestore(rq, &rf); ++} ++ ++void send_call_function_single_ipi(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (!set_nr_if_polling(rq->idle)) ++ arch_send_call_function_single_ipi(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++/* ++ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if ++ * necessary. The wakee CPU on receipt of the IPI will queue the task ++ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost ++ * of the wakeup instead of the waker. ++ */ ++static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); ++ ++ WRITE_ONCE(rq->ttwu_pending, 1); ++ __smp_call_single_queue(cpu, &p->wake_entry.llist); ++} ++ ++static inline bool ttwu_queue_cond(int cpu, int wake_flags) ++{ ++ /* ++ * Do not complicate things with the async wake_list while the CPU is ++ * in hotplug state. ++ */ ++ if (!cpu_active(cpu)) ++ return false; ++ ++ /* ++ * If the CPU does not share cache, then queue the task on the ++ * remote rqs wakelist to avoid accessing remote data. ++ */ ++ if (!cpus_share_cache(smp_processor_id(), cpu)) ++ return true; ++ ++ /* ++ * If the task is descheduling and the only running task on the ++ * CPU then use the wakelist to offload the task activation to ++ * the soon-to-be-idle CPU as the current CPU is likely busy. ++ * nr_running is checked to avoid unnecessary task stacking. ++ */ ++ if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1) ++ return true; ++ ++ return false; ++} ++ ++static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) ++{ ++ if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) { ++ if (WARN_ON_ONCE(cpu == smp_processor_id())) ++ return false; ++ ++ sched_clock_cpu(cpu); /* Sync clocks across CPUs */ ++ __ttwu_queue_wakelist(p, cpu, wake_flags); ++ return true; ++ } ++ ++ return false; ++} ++ ++void wake_up_if_idle(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ rcu_read_lock(); ++ ++ if (!is_idle_task(rcu_dereference(rq->curr))) ++ goto out; ++ ++ if (set_nr_if_polling(rq->idle)) { ++ trace_sched_wake_idle_without_ipi(cpu); ++ } else { ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ if (is_idle_task(rq->curr)) ++ smp_send_reschedule(cpu); ++ /* Else CPU is not idle, do nothing here */ ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ } ++ ++out: ++ rcu_read_unlock(); ++} ++ ++bool cpus_share_cache(int this_cpu, int that_cpu) ++{ ++ return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); ++} ++#else /* !CONFIG_SMP */ ++ ++static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) ++{ ++ return false; ++} ++ ++#endif /* CONFIG_SMP */ ++ ++static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (ttwu_queue_wakelist(p, cpu, wake_flags)) ++ return; ++ ++ raw_spin_lock(&rq->lock); ++ update_rq_clock(rq); ++ ttwu_do_activate(rq, p, wake_flags); ++ raw_spin_unlock(&rq->lock); ++} ++ ++/* ++ * Notes on Program-Order guarantees on SMP systems. ++ * ++ * MIGRATION ++ * ++ * The basic program-order guarantee on SMP systems is that when a task [t] ++ * migrates, all its activity on its old CPU [c0] happens-before any subsequent ++ * execution on its new CPU [c1]. ++ * ++ * For migration (of runnable tasks) this is provided by the following means: ++ * ++ * A) UNLOCK of the rq(c0)->lock scheduling out task t ++ * B) migration for t is required to synchronize *both* rq(c0)->lock and ++ * rq(c1)->lock (if not at the same time, then in that order). ++ * C) LOCK of the rq(c1)->lock scheduling in task ++ * ++ * Transitivity guarantees that B happens after A and C after B. ++ * Note: we only require RCpc transitivity. ++ * Note: the CPU doing B need not be c0 or c1 ++ * ++ * Example: ++ * ++ * CPU0 CPU1 CPU2 ++ * ++ * LOCK rq(0)->lock ++ * sched-out X ++ * sched-in Y ++ * UNLOCK rq(0)->lock ++ * ++ * LOCK rq(0)->lock // orders against CPU0 ++ * dequeue X ++ * UNLOCK rq(0)->lock ++ * ++ * LOCK rq(1)->lock ++ * enqueue X ++ * UNLOCK rq(1)->lock ++ * ++ * LOCK rq(1)->lock // orders against CPU2 ++ * sched-out Z ++ * sched-in X ++ * UNLOCK rq(1)->lock ++ * ++ * ++ * BLOCKING -- aka. SLEEP + WAKEUP ++ * ++ * For blocking we (obviously) need to provide the same guarantee as for ++ * migration. However the means are completely different as there is no lock ++ * chain to provide order. Instead we do: ++ * ++ * 1) smp_store_release(X->on_cpu, 0) -- finish_task() ++ * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() ++ * ++ * Example: ++ * ++ * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) ++ * ++ * LOCK rq(0)->lock LOCK X->pi_lock ++ * dequeue X ++ * sched-out X ++ * smp_store_release(X->on_cpu, 0); ++ * ++ * smp_cond_load_acquire(&X->on_cpu, !VAL); ++ * X->state = WAKING ++ * set_task_cpu(X,2) ++ * ++ * LOCK rq(2)->lock ++ * enqueue X ++ * X->state = RUNNING ++ * UNLOCK rq(2)->lock ++ * ++ * LOCK rq(2)->lock // orders against CPU1 ++ * sched-out Z ++ * sched-in X ++ * UNLOCK rq(2)->lock ++ * ++ * UNLOCK X->pi_lock ++ * UNLOCK rq(0)->lock ++ * ++ * ++ * However; for wakeups there is a second guarantee we must provide, namely we ++ * must observe the state that lead to our wakeup. That is, not only must our ++ * task observe its own prior state, it must also observe the stores prior to ++ * its wakeup. ++ * ++ * This means that any means of doing remote wakeups must order the CPU doing ++ * the wakeup against the CPU the task is going to end up running on. This, ++ * however, is already required for the regular Program-Order guarantee above, ++ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire). ++ * ++ */ ++ ++/** ++ * try_to_wake_up - wake up a thread ++ * @p: the thread to be awakened ++ * @state: the mask of task states that can be woken ++ * @wake_flags: wake modifier flags (WF_*) ++ * ++ * Conceptually does: ++ * ++ * If (@state & @p->state) @p->state = TASK_RUNNING. ++ * ++ * If the task was not queued/runnable, also place it back on a runqueue. ++ * ++ * This function is atomic against schedule() which would dequeue the task. ++ * ++ * It issues a full memory barrier before accessing @p->state, see the comment ++ * with set_current_state(). ++ * ++ * Uses p->pi_lock to serialize against concurrent wake-ups. ++ * ++ * Relies on p->pi_lock stabilizing: ++ * - p->sched_class ++ * - p->cpus_ptr ++ * - p->sched_task_group ++ * in order to do migration, see its use of select_task_rq()/set_task_cpu(). ++ * ++ * Tries really hard to only take one task_rq(p)->lock for performance. ++ * Takes rq->lock in: ++ * - ttwu_runnable() -- old rq, unavoidable, see comment there; ++ * - ttwu_queue() -- new rq, for enqueue of the task; ++ * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. ++ * ++ * As a consequence we race really badly with just about everything. See the ++ * many memory barriers and their comments for details. ++ * ++ * Return: %true if @p->state changes (an actual wakeup was done), ++ * %false otherwise. ++ */ ++static int try_to_wake_up(struct task_struct *p, unsigned int state, ++ int wake_flags) ++{ ++ unsigned long flags; ++ int cpu, success = 0; ++ ++ preempt_disable(); ++ if (p == current) { ++ /* ++ * We're waking current, this means 'p->on_rq' and 'task_cpu(p) ++ * == smp_processor_id()'. Together this means we can special ++ * case the whole 'p->on_rq && ttwu_runnable()' case below ++ * without taking any locks. ++ * ++ * In particular: ++ * - we rely on Program-Order guarantees for all the ordering, ++ * - we're serialized against set_special_state() by virtue of ++ * it disabling IRQs (this allows not taking ->pi_lock). ++ */ ++ if (!(p->state & state)) ++ goto out; ++ ++ success = 1; ++ trace_sched_waking(p); ++ p->state = TASK_RUNNING; ++ trace_sched_wakeup(p); ++ goto out; ++ } ++ ++ /* ++ * If we are going to wake up a thread waiting for CONDITION we ++ * need to ensure that CONDITION=1 done by the caller can not be ++ * reordered with p->state check below. This pairs with smp_store_mb() ++ * in set_current_state() that the waiting thread does. ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ smp_mb__after_spinlock(); ++ if (!(p->state & state)) ++ goto unlock; ++ ++ trace_sched_waking(p); ++ ++ /* We're going to change ->state: */ ++ success = 1; ++ ++ /* ++ * Ensure we load p->on_rq _after_ p->state, otherwise it would ++ * be possible to, falsely, observe p->on_rq == 0 and get stuck ++ * in smp_cond_load_acquire() below. ++ * ++ * sched_ttwu_pending() try_to_wake_up() ++ * STORE p->on_rq = 1 LOAD p->state ++ * UNLOCK rq->lock ++ * ++ * __schedule() (switch to task 'p') ++ * LOCK rq->lock smp_rmb(); ++ * smp_mb__after_spinlock(); ++ * UNLOCK rq->lock ++ * ++ * [task p] ++ * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq ++ * ++ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in ++ * __schedule(). See the comment for smp_mb__after_spinlock(). ++ * ++ * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). ++ */ ++ smp_rmb(); ++ if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) ++ goto unlock; ++ ++#ifdef CONFIG_SMP ++ /* ++ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be ++ * possible to, falsely, observe p->on_cpu == 0. ++ * ++ * One must be running (->on_cpu == 1) in order to remove oneself ++ * from the runqueue. ++ * ++ * __schedule() (switch to task 'p') try_to_wake_up() ++ * STORE p->on_cpu = 1 LOAD p->on_rq ++ * UNLOCK rq->lock ++ * ++ * __schedule() (put 'p' to sleep) ++ * LOCK rq->lock smp_rmb(); ++ * smp_mb__after_spinlock(); ++ * STORE p->on_rq = 0 LOAD p->on_cpu ++ * ++ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in ++ * __schedule(). See the comment for smp_mb__after_spinlock(). ++ * ++ * Form a control-dep-acquire with p->on_rq == 0 above, to ensure ++ * schedule()'s deactivate_task() has 'happened' and p will no longer ++ * care about it's own p->state. See the comment in __schedule(). ++ */ ++ smp_acquire__after_ctrl_dep(); ++ ++ /* ++ * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq ++ * == 0), which means we need to do an enqueue, change p->state to ++ * TASK_WAKING such that we can unlock p->pi_lock before doing the ++ * enqueue, such as ttwu_queue_wakelist(). ++ */ ++ p->state = TASK_WAKING; ++ ++ /* ++ * If the owning (remote) CPU is still in the middle of schedule() with ++ * this task as prev, considering queueing p on the remote CPUs wake_list ++ * which potentially sends an IPI instead of spinning on p->on_cpu to ++ * let the waker make forward progress. This is safe because IRQs are ++ * disabled and the IPI will deliver after on_cpu is cleared. ++ * ++ * Ensure we load task_cpu(p) after p->on_cpu: ++ * ++ * set_task_cpu(p, cpu); ++ * STORE p->cpu = @cpu ++ * __schedule() (switch to task 'p') ++ * LOCK rq->lock ++ * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) ++ * STORE p->on_cpu = 1 LOAD p->cpu ++ * ++ * to ensure we observe the correct CPU on which the task is currently ++ * scheduling. ++ */ ++ if (smp_load_acquire(&p->on_cpu) && ++ ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU)) ++ goto unlock; ++ ++ /* ++ * If the owning (remote) CPU is still in the middle of schedule() with ++ * this task as prev, wait until it's done referencing the task. ++ * ++ * Pairs with the smp_store_release() in finish_task(). ++ * ++ * This ensures that tasks getting woken will be fully ordered against ++ * their previous state and preserve Program Order. ++ */ ++ smp_cond_load_acquire(&p->on_cpu, !VAL); ++ ++ sched_task_ttwu(p); ++ ++ cpu = select_task_rq(p, this_rq()); ++ ++ if (cpu != task_cpu(p)) { ++ if (p->in_iowait) { ++ delayacct_blkio_end(p); ++ atomic_dec(&task_rq(p)->nr_iowait); ++ } ++ ++ wake_flags |= WF_MIGRATED; ++ psi_ttwu_dequeue(p); ++ set_task_cpu(p, cpu); ++ } ++#else ++ cpu = task_cpu(p); ++#endif /* CONFIG_SMP */ ++ ++ ttwu_queue(p, cpu, wake_flags); ++unlock: ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++out: ++ if (success) ++ ttwu_stat(p, task_cpu(p), wake_flags); ++ preempt_enable(); ++ ++ return success; ++} ++ ++/** ++ * try_invoke_on_locked_down_task - Invoke a function on task in fixed state ++ * @p: Process for which the function is to be invoked, can be @current. ++ * @func: Function to invoke. ++ * @arg: Argument to function. ++ * ++ * If the specified task can be quickly locked into a definite state ++ * (either sleeping or on a given runqueue), arrange to keep it in that ++ * state while invoking @func(@arg). This function can use ->on_rq and ++ * task_curr() to work out what the state is, if required. Given that ++ * @func can be invoked with a runqueue lock held, it had better be quite ++ * lightweight. ++ * ++ * Returns: ++ * @false if the task slipped out from under the locks. ++ * @true if the task was locked onto a runqueue or is sleeping. ++ * However, @func can override this by returning @false. ++ */ ++bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg) ++{ ++ struct rq_flags rf; ++ bool ret = false; ++ struct rq *rq; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, rf.flags); ++ if (p->on_rq) { ++ rq = __task_rq_lock(p, &rf); ++ if (task_rq(p) == rq) ++ ret = func(p, arg); ++ __task_rq_unlock(rq, &rf); ++ } else { ++ switch (p->state) { ++ case TASK_RUNNING: ++ case TASK_WAKING: ++ break; ++ default: ++ smp_rmb(); // See smp_rmb() comment in try_to_wake_up(). ++ if (!p->on_rq) ++ ret = func(p, arg); ++ } ++ } ++ raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); ++ return ret; ++} ++ ++/** ++ * wake_up_process - Wake up a specific process ++ * @p: The process to be woken up. ++ * ++ * Attempt to wake up the nominated process and move it to the set of runnable ++ * processes. ++ * ++ * Return: 1 if the process was woken up, 0 if it was already running. ++ * ++ * This function executes a full memory barrier before accessing the task state. ++ */ ++int wake_up_process(struct task_struct *p) ++{ ++ return try_to_wake_up(p, TASK_NORMAL, 0); ++} ++EXPORT_SYMBOL(wake_up_process); ++ ++int wake_up_state(struct task_struct *p, unsigned int state) ++{ ++ return try_to_wake_up(p, state, 0); ++} ++ ++/* ++ * Perform scheduler related setup for a newly forked process p. ++ * p is forked by current. ++ * ++ * __sched_fork() is basic setup used by init_idle() too: ++ */ ++static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p) ++{ ++ p->on_rq = 0; ++ p->on_cpu = 0; ++ p->utime = 0; ++ p->stime = 0; ++ p->sched_time = 0; ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ INIT_HLIST_HEAD(&p->preempt_notifiers); ++#endif ++ ++#ifdef CONFIG_COMPACTION ++ p->capture_control = NULL; ++#endif ++#ifdef CONFIG_SMP ++ p->wake_entry.u_flags = CSD_TYPE_TTWU; ++#endif ++} ++ ++/* ++ * fork()/clone()-time setup: ++ */ ++int sched_fork(unsigned long clone_flags, struct task_struct *p) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ ++ __sched_fork(clone_flags, p); ++ /* ++ * We mark the process as NEW here. This guarantees that ++ * nobody will actually run it, and a signal or other external ++ * event cannot wake it up and insert it on the runqueue either. ++ */ ++ p->state = TASK_NEW; ++ ++ /* ++ * Make sure we do not leak PI boosting priority to the child. ++ */ ++ p->prio = current->normal_prio; ++ ++ /* ++ * Revert to default priority/policy on fork if requested. ++ */ ++ if (unlikely(p->sched_reset_on_fork)) { ++ if (task_has_rt_policy(p)) { ++ p->policy = SCHED_NORMAL; ++ p->static_prio = NICE_TO_PRIO(0); ++ p->rt_priority = 0; ++ } else if (PRIO_TO_NICE(p->static_prio) < 0) ++ p->static_prio = NICE_TO_PRIO(0); ++ ++ p->prio = p->normal_prio = normal_prio(p); ++ ++ /* ++ * We don't need the reset flag anymore after the fork. It has ++ * fulfilled its duty: ++ */ ++ p->sched_reset_on_fork = 0; ++ } ++ ++ /* ++ * The child is not yet in the pid-hash so no cgroup attach races, ++ * and the cgroup is pinned to this child due to cgroup_fork() ++ * is ran before sched_fork(). ++ * ++ * Silence PROVE_RCU. ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ /* ++ * Share the timeslice between parent and child, thus the ++ * total amount of pending timeslices in the system doesn't change, ++ * resulting in more scheduling fairness. ++ */ ++ rq = this_rq(); ++ raw_spin_lock(&rq->lock); ++ ++ rq->curr->time_slice /= 2; ++ p->time_slice = rq->curr->time_slice; ++#ifdef CONFIG_SCHED_HRTICK ++ hrtick_start(rq, rq->curr->time_slice); ++#endif ++ ++ if (p->time_slice < RESCHED_NS) { ++ p->time_slice = sched_timeslice_ns; ++ resched_curr(rq); ++ } ++ sched_task_fork(p, rq); ++ raw_spin_unlock(&rq->lock); ++ ++ rseq_migrate(p); ++ /* ++ * We're setting the CPU for the first time, we don't migrate, ++ * so use __set_task_cpu(). ++ */ ++ __set_task_cpu(p, cpu_of(rq)); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++#ifdef CONFIG_SCHED_INFO ++ if (unlikely(sched_info_on())) ++ memset(&p->sched_info, 0, sizeof(p->sched_info)); ++#endif ++ init_task_preempt_count(p); ++ ++ return 0; ++} ++ ++void sched_post_fork(struct task_struct *p) {} ++ ++#ifdef CONFIG_SCHEDSTATS ++ ++DEFINE_STATIC_KEY_FALSE(sched_schedstats); ++static bool __initdata __sched_schedstats = false; ++ ++static void set_schedstats(bool enabled) ++{ ++ if (enabled) ++ static_branch_enable(&sched_schedstats); ++ else ++ static_branch_disable(&sched_schedstats); ++} ++ ++void force_schedstat_enabled(void) ++{ ++ if (!schedstat_enabled()) { ++ pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); ++ static_branch_enable(&sched_schedstats); ++ } ++} ++ ++static int __init setup_schedstats(char *str) ++{ ++ int ret = 0; ++ if (!str) ++ goto out; ++ ++ /* ++ * This code is called before jump labels have been set up, so we can't ++ * change the static branch directly just yet. Instead set a temporary ++ * variable so init_schedstats() can do it later. ++ */ ++ if (!strcmp(str, "enable")) { ++ __sched_schedstats = true; ++ ret = 1; ++ } else if (!strcmp(str, "disable")) { ++ __sched_schedstats = false; ++ ret = 1; ++ } ++out: ++ if (!ret) ++ pr_warn("Unable to parse schedstats=\n"); ++ ++ return ret; ++} ++__setup("schedstats=", setup_schedstats); ++ ++static void __init init_schedstats(void) ++{ ++ set_schedstats(__sched_schedstats); ++} ++ ++#ifdef CONFIG_PROC_SYSCTL ++int sysctl_schedstats(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos) ++{ ++ struct ctl_table t; ++ int err; ++ int state = static_branch_likely(&sched_schedstats); ++ ++ if (write && !capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ t = *table; ++ t.data = &state; ++ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); ++ if (err < 0) ++ return err; ++ if (write) ++ set_schedstats(state); ++ return err; ++} ++#endif /* CONFIG_PROC_SYSCTL */ ++#else /* !CONFIG_SCHEDSTATS */ ++static inline void init_schedstats(void) {} ++#endif /* CONFIG_SCHEDSTATS */ ++ ++/* ++ * wake_up_new_task - wake up a newly created task for the first time. ++ * ++ * This function will do some initial scheduler statistics housekeeping ++ * that must be done for every newly created context, then puts the task ++ * on the runqueue and wakes it. ++ */ ++void wake_up_new_task(struct task_struct *p) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ ++ p->state = TASK_RUNNING; ++ ++ rq = cpu_rq(select_task_rq(p, this_rq())); ++#ifdef CONFIG_SMP ++ rseq_migrate(p); ++ /* ++ * Fork balancing, do it here and not earlier because: ++ * - cpus_ptr can change in the fork path ++ * - any previously selected CPU might disappear through hotplug ++ * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, ++ * as we're not fully set-up yet. ++ */ ++ __set_task_cpu(p, cpu_of(rq)); ++#endif ++ ++ raw_spin_lock(&rq->lock); ++ ++ update_rq_clock(rq); ++ activate_task(p, rq); ++ trace_sched_wakeup_new(p); ++ check_preempt_curr(rq); ++ ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++} ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ ++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); ++ ++void preempt_notifier_inc(void) ++{ ++ static_branch_inc(&preempt_notifier_key); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_inc); ++ ++void preempt_notifier_dec(void) ++{ ++ static_branch_dec(&preempt_notifier_key); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_dec); ++ ++/** ++ * preempt_notifier_register - tell me when current is being preempted & rescheduled ++ * @notifier: notifier struct to register ++ */ ++void preempt_notifier_register(struct preempt_notifier *notifier) ++{ ++ if (!static_branch_unlikely(&preempt_notifier_key)) ++ WARN(1, "registering preempt_notifier while notifiers disabled\n"); ++ ++ hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_register); ++ ++/** ++ * preempt_notifier_unregister - no longer interested in preemption notifications ++ * @notifier: notifier struct to unregister ++ * ++ * This is *not* safe to call from within a preemption notifier. ++ */ ++void preempt_notifier_unregister(struct preempt_notifier *notifier) ++{ ++ hlist_del(¬ifier->link); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_unregister); ++ ++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_in(notifier, raw_smp_processor_id()); ++} ++ ++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ if (static_branch_unlikely(&preempt_notifier_key)) ++ __fire_sched_in_preempt_notifiers(curr); ++} ++ ++static void ++__fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_out(notifier, next); ++} ++ ++static __always_inline void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ if (static_branch_unlikely(&preempt_notifier_key)) ++ __fire_sched_out_preempt_notifiers(curr, next); ++} ++ ++#else /* !CONFIG_PREEMPT_NOTIFIERS */ ++ ++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++} ++ ++static inline void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++} ++ ++#endif /* CONFIG_PREEMPT_NOTIFIERS */ ++ ++static inline void prepare_task(struct task_struct *next) ++{ ++ /* ++ * Claim the task as running, we do this before switching to it ++ * such that any running task will have this set. ++ * ++ * See the ttwu() WF_ON_CPU case and its ordering comment. ++ */ ++ WRITE_ONCE(next->on_cpu, 1); ++} ++ ++static inline void finish_task(struct task_struct *prev) ++{ ++#ifdef CONFIG_SMP ++ /* ++ * This must be the very last reference to @prev from this CPU. After ++ * p->on_cpu is cleared, the task can be moved to a different CPU. We ++ * must ensure this doesn't happen until the switch is completely ++ * finished. ++ * ++ * In particular, the load of prev->state in finish_task_switch() must ++ * happen before this. ++ * ++ * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). ++ */ ++ smp_store_release(&prev->on_cpu, 0); ++#else ++ prev->on_cpu = 0; ++#endif ++} ++ ++#ifdef CONFIG_SMP ++ ++static void do_balance_callbacks(struct rq *rq, struct callback_head *head) ++{ ++ void (*func)(struct rq *rq); ++ struct callback_head *next; ++ ++ lockdep_assert_held(&rq->lock); ++ ++ while (head) { ++ func = (void (*)(struct rq *))head->func; ++ next = head->next; ++ head->next = NULL; ++ head = next; ++ ++ func(rq); ++ } ++} ++ ++static void balance_push(struct rq *rq); ++ ++struct callback_head balance_push_callback = { ++ .next = NULL, ++ .func = (void (*)(struct callback_head *))balance_push, ++}; ++ ++static inline struct callback_head *splice_balance_callbacks(struct rq *rq) ++{ ++ struct callback_head *head = rq->balance_callback; ++ ++ if (head) { ++ lockdep_assert_held(&rq->lock); ++ rq->balance_callback = NULL; ++ } ++ ++ return head; ++} ++ ++static void __balance_callbacks(struct rq *rq) ++{ ++ do_balance_callbacks(rq, splice_balance_callbacks(rq)); ++} ++ ++static inline void balance_callbacks(struct rq *rq, struct callback_head *head) ++{ ++ unsigned long flags; ++ ++ if (unlikely(head)) { ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ do_balance_callbacks(rq, head); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ } ++} ++ ++#else ++ ++static inline void __balance_callbacks(struct rq *rq) ++{ ++} ++ ++static inline struct callback_head *splice_balance_callbacks(struct rq *rq) ++{ ++ return NULL; ++} ++ ++static inline void balance_callbacks(struct rq *rq, struct callback_head *head) ++{ ++} ++ ++#endif ++ ++static inline void ++prepare_lock_switch(struct rq *rq, struct task_struct *next) ++{ ++ /* ++ * Since the runqueue lock will be released by the next ++ * task (which is an invalid locking op but in the case ++ * of the scheduler it's an obvious special-case), so we ++ * do an early lockdep release here: ++ */ ++ spin_release(&rq->lock.dep_map, _THIS_IP_); ++#ifdef CONFIG_DEBUG_SPINLOCK ++ /* this is a valid case when another task releases the spinlock */ ++ rq->lock.owner = next; ++#endif ++} ++ ++static inline void finish_lock_switch(struct rq *rq) ++{ ++ /* ++ * If we are tracking spinlock dependencies then we have to ++ * fix up the runqueue lock - which gets 'carried over' from ++ * prev into current: ++ */ ++ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); ++ __balance_callbacks(rq); ++ raw_spin_unlock_irq(&rq->lock); ++} ++ ++/* ++ * NOP if the arch has not defined these: ++ */ ++ ++#ifndef prepare_arch_switch ++# define prepare_arch_switch(next) do { } while (0) ++#endif ++ ++#ifndef finish_arch_post_lock_switch ++# define finish_arch_post_lock_switch() do { } while (0) ++#endif ++ ++static inline void kmap_local_sched_out(void) ++{ ++#ifdef CONFIG_KMAP_LOCAL ++ if (unlikely(current->kmap_ctrl.idx)) ++ __kmap_local_sched_out(); ++#endif ++} ++ ++static inline void kmap_local_sched_in(void) ++{ ++#ifdef CONFIG_KMAP_LOCAL ++ if (unlikely(current->kmap_ctrl.idx)) ++ __kmap_local_sched_in(); ++#endif ++} ++ ++/** ++ * prepare_task_switch - prepare to switch tasks ++ * @rq: the runqueue preparing to switch ++ * @next: the task we are going to switch to. ++ * ++ * This is called with the rq lock held and interrupts off. It must ++ * be paired with a subsequent finish_task_switch after the context ++ * switch. ++ * ++ * prepare_task_switch sets up locking and calls architecture specific ++ * hooks. ++ */ ++static inline void ++prepare_task_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ kcov_prepare_switch(prev); ++ sched_info_switch(rq, prev, next); ++ perf_event_task_sched_out(prev, next); ++ rseq_preempt(prev); ++ fire_sched_out_preempt_notifiers(prev, next); ++ kmap_local_sched_out(); ++ prepare_task(next); ++ prepare_arch_switch(next); ++} ++ ++/** ++ * finish_task_switch - clean up after a task-switch ++ * @rq: runqueue associated with task-switch ++ * @prev: the thread we just switched away from. ++ * ++ * finish_task_switch must be called after the context switch, paired ++ * with a prepare_task_switch call before the context switch. ++ * finish_task_switch will reconcile locking set up by prepare_task_switch, ++ * and do any other architecture-specific cleanup actions. ++ * ++ * Note that we may have delayed dropping an mm in context_switch(). If ++ * so, we finish that here outside of the runqueue lock. (Doing it ++ * with the lock held can cause deadlocks; see schedule() for ++ * details.) ++ * ++ * The context switch have flipped the stack from under us and restored the ++ * local variables which were saved when this task called schedule() in the ++ * past. prev == current is still correct but we need to recalculate this_rq ++ * because prev may have moved to another CPU. ++ */ ++static struct rq *finish_task_switch(struct task_struct *prev) ++ __releases(rq->lock) ++{ ++ struct rq *rq = this_rq(); ++ struct mm_struct *mm = rq->prev_mm; ++ long prev_state; ++ ++ /* ++ * The previous task will have left us with a preempt_count of 2 ++ * because it left us after: ++ * ++ * schedule() ++ * preempt_disable(); // 1 ++ * __schedule() ++ * raw_spin_lock_irq(&rq->lock) // 2 ++ * ++ * Also, see FORK_PREEMPT_COUNT. ++ */ ++ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, ++ "corrupted preempt_count: %s/%d/0x%x\n", ++ current->comm, current->pid, preempt_count())) ++ preempt_count_set(FORK_PREEMPT_COUNT); ++ ++ rq->prev_mm = NULL; ++ ++ /* ++ * A task struct has one reference for the use as "current". ++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls ++ * schedule one last time. The schedule call will never return, and ++ * the scheduled task must drop that reference. ++ * ++ * We must observe prev->state before clearing prev->on_cpu (in ++ * finish_task), otherwise a concurrent wakeup can get prev ++ * running on another CPU and we could rave with its RUNNING -> DEAD ++ * transition, resulting in a double drop. ++ */ ++ prev_state = prev->state; ++ vtime_task_switch(prev); ++ perf_event_task_sched_in(prev, current); ++ finish_task(prev); ++ finish_lock_switch(rq); ++ finish_arch_post_lock_switch(); ++ kcov_finish_switch(current); ++ /* ++ * kmap_local_sched_out() is invoked with rq::lock held and ++ * interrupts disabled. There is no requirement for that, but the ++ * sched out code does not have an interrupt enabled section. ++ * Restoring the maps on sched in does not require interrupts being ++ * disabled either. ++ */ ++ kmap_local_sched_in(); ++ ++ fire_sched_in_preempt_notifiers(current); ++ /* ++ * When switching through a kernel thread, the loop in ++ * membarrier_{private,global}_expedited() may have observed that ++ * kernel thread and not issued an IPI. It is therefore possible to ++ * schedule between user->kernel->user threads without passing though ++ * switch_mm(). Membarrier requires a barrier after storing to ++ * rq->curr, before returning to userspace, so provide them here: ++ * ++ * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly ++ * provided by mmdrop(), ++ * - a sync_core for SYNC_CORE. ++ */ ++ if (mm) { ++ membarrier_mm_sync_core_before_usermode(mm); ++ mmdrop(mm); ++ } ++ if (unlikely(prev_state == TASK_DEAD)) { ++ /* ++ * Remove function-return probe instances associated with this ++ * task and put them back on the free list. ++ */ ++ kprobe_flush_task(prev); ++ ++ /* Task is done with its stack. */ ++ put_task_stack(prev); ++ ++ put_task_struct_rcu_user(prev); ++ } ++ ++ tick_nohz_task_switch(); ++ return rq; ++} ++ ++/** ++ * schedule_tail - first thing a freshly forked thread must call. ++ * @prev: the thread we just switched away from. ++ */ ++asmlinkage __visible void schedule_tail(struct task_struct *prev) ++ __releases(rq->lock) ++{ ++ struct rq *rq; ++ ++ /* ++ * New tasks start with FORK_PREEMPT_COUNT, see there and ++ * finish_task_switch() for details. ++ * ++ * finish_task_switch() will drop rq->lock() and lower preempt_count ++ * and the preempt_enable() will end up enabling preemption (on ++ * PREEMPT_COUNT kernels). ++ */ ++ ++ rq = finish_task_switch(prev); ++ preempt_enable(); ++ ++ if (current->set_child_tid) ++ put_user(task_pid_vnr(current), current->set_child_tid); ++ ++ calculate_sigpending(); ++} ++ ++/* ++ * context_switch - switch to the new MM and the new thread's register state. ++ */ ++static __always_inline struct rq * ++context_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ prepare_task_switch(rq, prev, next); ++ ++ /* ++ * For paravirt, this is coupled with an exit in switch_to to ++ * combine the page table reload and the switch backend into ++ * one hypercall. ++ */ ++ arch_start_context_switch(prev); ++ ++ /* ++ * kernel -> kernel lazy + transfer active ++ * user -> kernel lazy + mmgrab() active ++ * ++ * kernel -> user switch + mmdrop() active ++ * user -> user switch ++ */ ++ if (!next->mm) { // to kernel ++ enter_lazy_tlb(prev->active_mm, next); ++ ++ next->active_mm = prev->active_mm; ++ if (prev->mm) // from user ++ mmgrab(prev->active_mm); ++ else ++ prev->active_mm = NULL; ++ } else { // to user ++ membarrier_switch_mm(rq, prev->active_mm, next->mm); ++ /* ++ * sys_membarrier() requires an smp_mb() between setting ++ * rq->curr / membarrier_switch_mm() and returning to userspace. ++ * ++ * The below provides this either through switch_mm(), or in ++ * case 'prev->active_mm == next->mm' through ++ * finish_task_switch()'s mmdrop(). ++ */ ++ switch_mm_irqs_off(prev->active_mm, next->mm, next); ++ ++ if (!prev->mm) { // from kernel ++ /* will mmdrop() in finish_task_switch(). */ ++ rq->prev_mm = prev->active_mm; ++ prev->active_mm = NULL; ++ } ++ } ++ ++ prepare_lock_switch(rq, next); ++ ++ /* Here we just switch the register state and the stack. */ ++ switch_to(prev, next, prev); ++ barrier(); ++ ++ return finish_task_switch(prev); ++} ++ ++/* ++ * nr_running, nr_uninterruptible and nr_context_switches: ++ * ++ * externally visible scheduler statistics: current number of runnable ++ * threads, total number of context switches performed since bootup. ++ */ ++unsigned long nr_running(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_online_cpu(i) ++ sum += cpu_rq(i)->nr_running; ++ ++ return sum; ++} ++ ++/* ++ * Check if only the current task is running on the CPU. ++ * ++ * Caution: this function does not check that the caller has disabled ++ * preemption, thus the result might have a time-of-check-to-time-of-use ++ * race. The caller is responsible to use it correctly, for example: ++ * ++ * - from a non-preemptible section (of course) ++ * ++ * - from a thread that is bound to a single CPU ++ * ++ * - in a loop with very short iterations (e.g. a polling loop) ++ */ ++bool single_task_running(void) ++{ ++ return raw_rq()->nr_running == 1; ++} ++EXPORT_SYMBOL(single_task_running); ++ ++unsigned long long nr_context_switches(void) ++{ ++ int i; ++ unsigned long long sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += cpu_rq(i)->nr_switches; ++ ++ return sum; ++} ++ ++/* ++ * Consumers of these two interfaces, like for example the cpuidle menu ++ * governor, are using nonsensical data. Preferring shallow idle state selection ++ * for a CPU that has IO-wait which might not even end up running the task when ++ * it does become runnable. ++ */ ++ ++unsigned long nr_iowait_cpu(int cpu) ++{ ++ return atomic_read(&cpu_rq(cpu)->nr_iowait); ++} ++ ++/* ++ * IO-wait accounting, and how it's mostly bollocks (on SMP). ++ * ++ * The idea behind IO-wait account is to account the idle time that we could ++ * have spend running if it were not for IO. That is, if we were to improve the ++ * storage performance, we'd have a proportional reduction in IO-wait time. ++ * ++ * This all works nicely on UP, where, when a task blocks on IO, we account ++ * idle time as IO-wait, because if the storage were faster, it could've been ++ * running and we'd not be idle. ++ * ++ * This has been extended to SMP, by doing the same for each CPU. This however ++ * is broken. ++ * ++ * Imagine for instance the case where two tasks block on one CPU, only the one ++ * CPU will have IO-wait accounted, while the other has regular idle. Even ++ * though, if the storage were faster, both could've ran at the same time, ++ * utilising both CPUs. ++ * ++ * This means, that when looking globally, the current IO-wait accounting on ++ * SMP is a lower bound, by reason of under accounting. ++ * ++ * Worse, since the numbers are provided per CPU, they are sometimes ++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly ++ * associated with any one particular CPU, it can wake to another CPU than it ++ * blocked on. This means the per CPU IO-wait number is meaningless. ++ * ++ * Task CPU affinities can make all that even more 'interesting'. ++ */ ++ ++unsigned long nr_iowait(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += nr_iowait_cpu(i); ++ ++ return sum; ++} ++ ++#ifdef CONFIG_SMP ++ ++/* ++ * sched_exec - execve() is a valuable balancing opportunity, because at ++ * this point the task has the smallest effective memory and cache ++ * footprint. ++ */ ++void sched_exec(void) ++{ ++ struct task_struct *p = current; ++ unsigned long flags; ++ int dest_cpu; ++ struct rq *rq; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ rq = this_rq(); ++ ++ if (rq != task_rq(p) || rq->nr_running < 2) ++ goto unlock; ++ ++ dest_cpu = select_task_rq(p, task_rq(p)); ++ if (dest_cpu == smp_processor_id()) ++ goto unlock; ++ ++ if (likely(cpu_active(dest_cpu))) { ++ struct migration_arg arg = { p, dest_cpu }; ++ ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); ++ return; ++ } ++unlock: ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++} ++ ++#endif ++ ++DEFINE_PER_CPU(struct kernel_stat, kstat); ++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); ++ ++EXPORT_PER_CPU_SYMBOL(kstat); ++EXPORT_PER_CPU_SYMBOL(kernel_cpustat); ++ ++static inline void update_curr(struct rq *rq, struct task_struct *p) ++{ ++ s64 ns = rq->clock_task - p->last_ran; ++ ++ p->sched_time += ns; ++ account_group_exec_runtime(p, ns); ++ ++ p->time_slice -= ns; ++ p->last_ran = rq->clock_task; ++} ++ ++/* ++ * Return accounted runtime for the task. ++ * Return separately the current's pending runtime that have not been ++ * accounted yet. ++ */ ++unsigned long long task_sched_runtime(struct task_struct *p) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ u64 ns; ++ ++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) ++ /* ++ * 64-bit doesn't need locks to atomically read a 64-bit value. ++ * So we have a optimization chance when the task's delta_exec is 0. ++ * Reading ->on_cpu is racy, but this is ok. ++ * ++ * If we race with it leaving CPU, we'll take a lock. So we're correct. ++ * If we race with it entering CPU, unaccounted time is 0. This is ++ * indistinguishable from the read occurring a few cycles earlier. ++ * If we see ->on_cpu without ->on_rq, the task is leaving, and has ++ * been accounted, so we're correct here as well. ++ */ ++ if (!p->on_cpu || !task_on_rq_queued(p)) ++ return tsk_seruntime(p); ++#endif ++ ++ rq = task_access_lock_irqsave(p, &lock, &flags); ++ /* ++ * Must be ->curr _and_ ->on_rq. If dequeued, we would ++ * project cycles that may never be accounted to this ++ * thread, breaking clock_gettime(). ++ */ ++ if (p == rq->curr && task_on_rq_queued(p)) { ++ update_rq_clock(rq); ++ update_curr(rq, p); ++ } ++ ns = tsk_seruntime(p); ++ task_access_unlock_irqrestore(p, lock, &flags); ++ ++ return ns; ++} ++ ++/* This manages tasks that have run out of timeslice during a scheduler_tick */ ++static inline void scheduler_task_tick(struct rq *rq) ++{ ++ struct task_struct *p = rq->curr; ++ ++ if (is_idle_task(p)) ++ return; ++ ++ update_curr(rq, p); ++ cpufreq_update_util(rq, 0); ++ ++ /* ++ * Tasks have less than RESCHED_NS of time slice left they will be ++ * rescheduled. ++ */ ++ if (p->time_slice >= RESCHED_NS) ++ return; ++ set_tsk_need_resched(p); ++ set_preempt_need_resched(); ++} ++ ++/* ++ * This function gets called by the timer code, with HZ frequency. ++ * We call it with interrupts disabled. ++ */ ++void scheduler_tick(void) ++{ ++ int cpu __maybe_unused = smp_processor_id(); ++ struct rq *rq = cpu_rq(cpu); ++ ++ arch_scale_freq_tick(); ++ sched_clock_tick(); ++ ++ raw_spin_lock(&rq->lock); ++ update_rq_clock(rq); ++ ++ scheduler_task_tick(rq); ++ calc_global_load_tick(rq); ++ ++ rq->last_tick = rq->clock; ++ raw_spin_unlock(&rq->lock); ++ ++ perf_event_task_tick(); ++} ++ ++#ifdef CONFIG_SCHED_SMT ++static inline int active_load_balance_cpu_stop(void *data) ++{ ++ struct rq *rq = this_rq(); ++ struct task_struct *p = data; ++ cpumask_t tmp; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ ++ raw_spin_lock(&p->pi_lock); ++ raw_spin_lock(&rq->lock); ++ ++ rq->active_balance = 0; ++ /* _something_ may have changed the task, double check again */ ++ if (task_on_rq_queued(p) && task_rq(p) == rq && ++ cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) && ++ !is_migration_disabled(p)) { ++ int cpu = cpu_of(rq); ++ int dcpu = __best_mask_cpu(cpu, &tmp, ++ per_cpu(sched_cpu_llc_mask, cpu)); ++ rq = move_queued_task(rq, p, dcpu); ++ } ++ ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock(&p->pi_lock); ++ ++ local_irq_restore(flags); ++ ++ return 0; ++} ++ ++/* sg_balance_trigger - trigger slibing group balance for @cpu */ ++static inline int sg_balance_trigger(const int cpu) ++{ ++ struct rq *rq= cpu_rq(cpu); ++ unsigned long flags; ++ struct task_struct *curr; ++ int res; ++ ++ if (!raw_spin_trylock_irqsave(&rq->lock, flags)) ++ return 0; ++ curr = rq->curr; ++ res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\ ++ cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\ ++ !is_migration_disabled(curr) && (!rq->active_balance); ++ ++ if (res) ++ rq->active_balance = 1; ++ ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++ if (res) ++ stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, ++ curr, &rq->active_balance_work); ++ return res; ++} ++ ++/* ++ * sg_balance_check - slibing group balance check for run queue @rq ++ */ ++static inline void sg_balance_check(struct rq *rq) ++{ ++ cpumask_t chk; ++ int cpu; ++ ++ /* exit when no sg in idle */ ++ if (cpumask_empty(&sched_sg_idle_mask)) ++ return; ++ ++ /* exit when cpu is offline */ ++ if (unlikely(!rq->online)) ++ return; ++ ++ cpu = cpu_of(rq); ++ /* ++ * Only cpu in slibing idle group will do the checking and then ++ * find potential cpus which can migrate the current running task ++ */ ++ if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) && ++ cpumask_andnot(&chk, cpu_online_mask, &sched_rq_pending_mask) && ++ cpumask_andnot(&chk, &chk, &sched_rq_watermark[IDLE_WM])) { ++ int i, tried = 0; ++ ++ for_each_cpu_wrap(i, &chk, cpu) { ++ if (cpumask_subset(cpu_smt_mask(i), &chk)) { ++ if (sg_balance_trigger(i)) ++ return; ++ if (tried) ++ return; ++ tried++; ++ } ++ } ++ } ++} ++#endif /* CONFIG_SCHED_SMT */ ++ ++#ifdef CONFIG_NO_HZ_FULL ++ ++struct tick_work { ++ int cpu; ++ atomic_t state; ++ struct delayed_work work; ++}; ++/* Values for ->state, see diagram below. */ ++#define TICK_SCHED_REMOTE_OFFLINE 0 ++#define TICK_SCHED_REMOTE_OFFLINING 1 ++#define TICK_SCHED_REMOTE_RUNNING 2 ++ ++/* ++ * State diagram for ->state: ++ * ++ * ++ * TICK_SCHED_REMOTE_OFFLINE ++ * | ^ ++ * | | ++ * | | sched_tick_remote() ++ * | | ++ * | | ++ * +--TICK_SCHED_REMOTE_OFFLINING ++ * | ^ ++ * | | ++ * sched_tick_start() | | sched_tick_stop() ++ * | | ++ * V | ++ * TICK_SCHED_REMOTE_RUNNING ++ * ++ * ++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() ++ * and sched_tick_start() are happy to leave the state in RUNNING. ++ */ ++ ++static struct tick_work __percpu *tick_work_cpu; ++ ++static void sched_tick_remote(struct work_struct *work) ++{ ++ struct delayed_work *dwork = to_delayed_work(work); ++ struct tick_work *twork = container_of(dwork, struct tick_work, work); ++ int cpu = twork->cpu; ++ struct rq *rq = cpu_rq(cpu); ++ struct task_struct *curr; ++ unsigned long flags; ++ u64 delta; ++ int os; ++ ++ /* ++ * Handle the tick only if it appears the remote CPU is running in full ++ * dynticks mode. The check is racy by nature, but missing a tick or ++ * having one too much is no big deal because the scheduler tick updates ++ * statistics and checks timeslices in a time-independent way, regardless ++ * of when exactly it is running. ++ */ ++ if (!tick_nohz_tick_stopped_cpu(cpu)) ++ goto out_requeue; ++ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ curr = rq->curr; ++ if (cpu_is_offline(cpu)) ++ goto out_unlock; ++ ++ update_rq_clock(rq); ++ if (!is_idle_task(curr)) { ++ /* ++ * Make sure the next tick runs within a reasonable ++ * amount of time. ++ */ ++ delta = rq_clock_task(rq) - curr->last_ran; ++ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); ++ } ++ scheduler_task_tick(rq); ++ ++ calc_load_nohz_remote(rq); ++out_unlock: ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++out_requeue: ++ /* ++ * Run the remote tick once per second (1Hz). This arbitrary ++ * frequency is large enough to avoid overload but short enough ++ * to keep scheduler internal stats reasonably up to date. But ++ * first update state to reflect hotplug activity if required. ++ */ ++ os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); ++ WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); ++ if (os == TICK_SCHED_REMOTE_RUNNING) ++ queue_delayed_work(system_unbound_wq, dwork, HZ); ++} ++ ++static void sched_tick_start(int cpu) ++{ ++ int os; ++ struct tick_work *twork; ++ ++ if (housekeeping_cpu(cpu, HK_FLAG_TICK)) ++ return; ++ ++ WARN_ON_ONCE(!tick_work_cpu); ++ ++ twork = per_cpu_ptr(tick_work_cpu, cpu); ++ os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); ++ WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); ++ if (os == TICK_SCHED_REMOTE_OFFLINE) { ++ twork->cpu = cpu; ++ INIT_DELAYED_WORK(&twork->work, sched_tick_remote); ++ queue_delayed_work(system_unbound_wq, &twork->work, HZ); ++ } ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++static void sched_tick_stop(int cpu) ++{ ++ struct tick_work *twork; ++ ++ if (housekeeping_cpu(cpu, HK_FLAG_TICK)) ++ return; ++ ++ WARN_ON_ONCE(!tick_work_cpu); ++ ++ twork = per_cpu_ptr(tick_work_cpu, cpu); ++ cancel_delayed_work_sync(&twork->work); ++} ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++int __init sched_tick_offload_init(void) ++{ ++ tick_work_cpu = alloc_percpu(struct tick_work); ++ BUG_ON(!tick_work_cpu); ++ return 0; ++} ++ ++#else /* !CONFIG_NO_HZ_FULL */ ++static inline void sched_tick_start(int cpu) { } ++static inline void sched_tick_stop(int cpu) { } ++#endif ++ ++#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ ++ defined(CONFIG_PREEMPT_TRACER)) ++/* ++ * If the value passed in is equal to the current preempt count ++ * then we just disabled preemption. Start timing the latency. ++ */ ++static inline void preempt_latency_start(int val) ++{ ++ if (preempt_count() == val) { ++ unsigned long ip = get_lock_parent_ip(); ++#ifdef CONFIG_DEBUG_PREEMPT ++ current->preempt_disable_ip = ip; ++#endif ++ trace_preempt_off(CALLER_ADDR0, ip); ++ } ++} ++ ++void preempt_count_add(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) ++ return; ++#endif ++ __preempt_count_add(val); ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Spinlock count overflowing soon? ++ */ ++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= ++ PREEMPT_MASK - 10); ++#endif ++ preempt_latency_start(val); ++} ++EXPORT_SYMBOL(preempt_count_add); ++NOKPROBE_SYMBOL(preempt_count_add); ++ ++/* ++ * If the value passed in equals to the current preempt count ++ * then we just enabled preemption. Stop timing the latency. ++ */ ++static inline void preempt_latency_stop(int val) ++{ ++ if (preempt_count() == val) ++ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); ++} ++ ++void preempt_count_sub(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) ++ return; ++ /* ++ * Is the spinlock portion underflowing? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && ++ !(preempt_count() & PREEMPT_MASK))) ++ return; ++#endif ++ ++ preempt_latency_stop(val); ++ __preempt_count_sub(val); ++} ++EXPORT_SYMBOL(preempt_count_sub); ++NOKPROBE_SYMBOL(preempt_count_sub); ++ ++#else ++static inline void preempt_latency_start(int val) { } ++static inline void preempt_latency_stop(int val) { } ++#endif ++ ++static inline unsigned long get_preempt_disable_ip(struct task_struct *p) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ return p->preempt_disable_ip; ++#else ++ return 0; ++#endif ++} ++ ++/* ++ * Print scheduling while atomic bug: ++ */ ++static noinline void __schedule_bug(struct task_struct *prev) ++{ ++ /* Save this before calling printk(), since that will clobber it */ ++ unsigned long preempt_disable_ip = get_preempt_disable_ip(current); ++ ++ if (oops_in_progress) ++ return; ++ ++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", ++ prev->comm, prev->pid, preempt_count()); ++ ++ debug_show_held_locks(prev); ++ print_modules(); ++ if (irqs_disabled()) ++ print_irqtrace_events(prev); ++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) ++ && in_atomic_preempt_off()) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(KERN_ERR, preempt_disable_ip); ++ } ++ if (panic_on_warn) ++ panic("scheduling while atomic\n"); ++ ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++ ++/* ++ * Various schedule()-time debugging checks and statistics: ++ */ ++static inline void schedule_debug(struct task_struct *prev, bool preempt) ++{ ++#ifdef CONFIG_SCHED_STACK_END_CHECK ++ if (task_stack_end_corrupted(prev)) ++ panic("corrupted stack end detected inside scheduler\n"); ++ ++ if (task_scs_end_corrupted(prev)) ++ panic("corrupted shadow stack detected inside scheduler\n"); ++#endif ++ ++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP ++ if (!preempt && prev->state && prev->non_block_count) { ++ printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", ++ prev->comm, prev->pid, prev->non_block_count); ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++ } ++#endif ++ ++ if (unlikely(in_atomic_preempt_off())) { ++ __schedule_bug(prev); ++ preempt_count_set(PREEMPT_DISABLED); ++ } ++ rcu_sleep_check(); ++ SCHED_WARN_ON(ct_state() == CONTEXT_USER); ++ ++ profile_hit(SCHED_PROFILING, __builtin_return_address(0)); ++ ++ schedstat_inc(this_rq()->sched_count); ++} ++ ++/* ++ * Compile time debug macro ++ * #define ALT_SCHED_DEBUG ++ */ ++ ++#ifdef ALT_SCHED_DEBUG ++void alt_sched_debug(void) ++{ ++ printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n", ++ sched_rq_pending_mask.bits[0], ++ sched_rq_watermark[IDLE_WM].bits[0], ++ sched_sg_idle_mask.bits[0]); ++} ++#else ++inline void alt_sched_debug(void) {} ++#endif ++ ++#ifdef CONFIG_SMP ++ ++#define SCHED_RQ_NR_MIGRATION (32U) ++/* ++ * Migrate pending tasks in @rq to @dest_cpu ++ * Will try to migrate mininal of half of @rq nr_running tasks and ++ * SCHED_RQ_NR_MIGRATION to @dest_cpu ++ */ ++static inline int ++migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu) ++{ ++ struct task_struct *p, *skip = rq->curr; ++ int nr_migrated = 0; ++ int nr_tries = min(rq->nr_running / 2, SCHED_RQ_NR_MIGRATION); ++ ++ while (skip != rq->idle && nr_tries && ++ (p = sched_rq_next_task(skip, rq)) != rq->idle) { ++ skip = sched_rq_next_task(p, rq); ++ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) { ++ __SCHED_DEQUEUE_TASK(p, rq, 0, ); ++ set_task_cpu(p, dest_cpu); ++ __SCHED_ENQUEUE_TASK(p, dest_rq, 0); ++ nr_migrated++; ++ } ++ nr_tries--; ++ } ++ ++ return nr_migrated; ++} ++ ++static inline int take_other_rq_tasks(struct rq *rq, int cpu) ++{ ++ struct cpumask *affinity_mask, *end_mask; ++ ++ if (unlikely(!rq->online)) ++ return 0; ++ ++ if (cpumask_empty(&sched_rq_pending_mask)) ++ return 0; ++ ++ affinity_mask = per_cpu(sched_cpu_affinity_masks, cpu) + 1; ++ end_mask = per_cpu(sched_cpu_affinity_end_mask, cpu); ++ do { ++ int i; ++ for_each_cpu_and(i, &sched_rq_pending_mask, affinity_mask) { ++ int nr_migrated; ++ struct rq *src_rq; ++ ++ src_rq = cpu_rq(i); ++ if (!do_raw_spin_trylock(&src_rq->lock)) ++ continue; ++ spin_acquire(&src_rq->lock.dep_map, ++ SINGLE_DEPTH_NESTING, 1, _RET_IP_); ++ ++ if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) { ++ src_rq->nr_running -= nr_migrated; ++ if (src_rq->nr_running < 2) ++ cpumask_clear_cpu(i, &sched_rq_pending_mask); ++ ++ rq->nr_running += nr_migrated; ++ if (rq->nr_running > 1) ++ cpumask_set_cpu(cpu, &sched_rq_pending_mask); ++ ++ update_sched_rq_watermark(rq); ++ cpufreq_update_util(rq, 0); ++ ++ spin_release(&src_rq->lock.dep_map, _RET_IP_); ++ do_raw_spin_unlock(&src_rq->lock); ++ ++ return 1; ++ } ++ ++ spin_release(&src_rq->lock.dep_map, _RET_IP_); ++ do_raw_spin_unlock(&src_rq->lock); ++ } ++ } while (++affinity_mask < end_mask); ++ ++ return 0; ++} ++#endif ++ ++/* ++ * Timeslices below RESCHED_NS are considered as good as expired as there's no ++ * point rescheduling when there's so little time left. ++ */ ++static inline void check_curr(struct task_struct *p, struct rq *rq) ++{ ++ if (unlikely(rq->idle == p)) ++ return; ++ ++ update_curr(rq, p); ++ ++ if (p->time_slice < RESCHED_NS) ++ time_slice_expired(p, rq); ++} ++ ++static inline struct task_struct * ++choose_next_task(struct rq *rq, int cpu, struct task_struct *prev) ++{ ++ struct task_struct *next; ++ ++ if (unlikely(rq->skip)) { ++ next = rq_runnable_task(rq); ++ if (next == rq->idle) { ++#ifdef CONFIG_SMP ++ if (!take_other_rq_tasks(rq, cpu)) { ++#endif ++ rq->skip = NULL; ++ schedstat_inc(rq->sched_goidle); ++ return next; ++#ifdef CONFIG_SMP ++ } ++ next = rq_runnable_task(rq); ++#endif ++ } ++ rq->skip = NULL; ++#ifdef CONFIG_HIGH_RES_TIMERS ++ hrtick_start(rq, next->time_slice); ++#endif ++ return next; ++ } ++ ++ next = sched_rq_first_task(rq); ++ if (next == rq->idle) { ++#ifdef CONFIG_SMP ++ if (!take_other_rq_tasks(rq, cpu)) { ++#endif ++ schedstat_inc(rq->sched_goidle); ++ /*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/ ++ return next; ++#ifdef CONFIG_SMP ++ } ++ next = sched_rq_first_task(rq); ++#endif ++ } ++#ifdef CONFIG_HIGH_RES_TIMERS ++ hrtick_start(rq, next->time_slice); ++#endif ++ /*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu, ++ * next);*/ ++ return next; ++} ++ ++/* ++ * schedule() is the main scheduler function. ++ * ++ * The main means of driving the scheduler and thus entering this function are: ++ * ++ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. ++ * ++ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return ++ * paths. For example, see arch/x86/entry_64.S. ++ * ++ * To drive preemption between tasks, the scheduler sets the flag in timer ++ * interrupt handler scheduler_tick(). ++ * ++ * 3. Wakeups don't really cause entry into schedule(). They add a ++ * task to the run-queue and that's it. ++ * ++ * Now, if the new task added to the run-queue preempts the current ++ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets ++ * called on the nearest possible occasion: ++ * ++ * - If the kernel is preemptible (CONFIG_PREEMPTION=y): ++ * ++ * - in syscall or exception context, at the next outmost ++ * preempt_enable(). (this might be as soon as the wake_up()'s ++ * spin_unlock()!) ++ * ++ * - in IRQ context, return from interrupt-handler to ++ * preemptible context ++ * ++ * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) ++ * then at the next: ++ * ++ * - cond_resched() call ++ * - explicit schedule() call ++ * - return from syscall or exception to user-space ++ * - return from interrupt-handler to user-space ++ * ++ * WARNING: must be called with preemption disabled! ++ */ ++static void __sched notrace __schedule(bool preempt) ++{ ++ struct task_struct *prev, *next; ++ unsigned long *switch_count; ++ unsigned long prev_state; ++ struct rq *rq; ++ int cpu; ++ ++ cpu = smp_processor_id(); ++ rq = cpu_rq(cpu); ++ prev = rq->curr; ++ ++ schedule_debug(prev, preempt); ++ ++ /* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */ ++ hrtick_clear(rq); ++ ++ local_irq_disable(); ++ rcu_note_context_switch(preempt); ++ ++ /* ++ * Make sure that signal_pending_state()->signal_pending() below ++ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) ++ * done by the caller to avoid the race with signal_wake_up(): ++ * ++ * __set_current_state(@state) signal_wake_up() ++ * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) ++ * wake_up_state(p, state) ++ * LOCK rq->lock LOCK p->pi_state ++ * smp_mb__after_spinlock() smp_mb__after_spinlock() ++ * if (signal_pending_state()) if (p->state & @state) ++ * ++ * Also, the membarrier system call requires a full memory barrier ++ * after coming from user-space, before storing to rq->curr. ++ */ ++ raw_spin_lock(&rq->lock); ++ smp_mb__after_spinlock(); ++ ++ update_rq_clock(rq); ++ ++ switch_count = &prev->nivcsw; ++ /* ++ * We must load prev->state once (task_struct::state is volatile), such ++ * that: ++ * ++ * - we form a control dependency vs deactivate_task() below. ++ * - ptrace_{,un}freeze_traced() can change ->state underneath us. ++ */ ++ prev_state = prev->state; ++ if (!preempt && prev_state && prev_state == prev->state) { ++ if (signal_pending_state(prev_state, prev)) { ++ prev->state = TASK_RUNNING; ++ } else { ++ prev->sched_contributes_to_load = ++ (prev_state & TASK_UNINTERRUPTIBLE) && ++ !(prev_state & TASK_NOLOAD) && ++ !(prev->flags & PF_FROZEN); ++ ++ if (prev->sched_contributes_to_load) ++ rq->nr_uninterruptible++; ++ ++ /* ++ * __schedule() ttwu() ++ * prev_state = prev->state; if (p->on_rq && ...) ++ * if (prev_state) goto out; ++ * p->on_rq = 0; smp_acquire__after_ctrl_dep(); ++ * p->state = TASK_WAKING ++ * ++ * Where __schedule() and ttwu() have matching control dependencies. ++ * ++ * After this, schedule() must not care about p->state any more. ++ */ ++ sched_task_deactivate(prev, rq); ++ deactivate_task(prev, rq); ++ ++ if (prev->in_iowait) { ++ atomic_inc(&rq->nr_iowait); ++ delayacct_blkio_start(); ++ } ++ } ++ switch_count = &prev->nvcsw; ++ } ++ ++ check_curr(prev, rq); ++ ++ next = choose_next_task(rq, cpu, prev); ++ clear_tsk_need_resched(prev); ++ clear_preempt_need_resched(); ++ ++ ++ if (likely(prev != next)) { ++ next->last_ran = rq->clock_task; ++ rq->last_ts_switch = rq->clock; ++ ++ rq->nr_switches++; ++ /* ++ * RCU users of rcu_dereference(rq->curr) may not see ++ * changes to task_struct made by pick_next_task(). ++ */ ++ RCU_INIT_POINTER(rq->curr, next); ++ /* ++ * The membarrier system call requires each architecture ++ * to have a full memory barrier after updating ++ * rq->curr, before returning to user-space. ++ * ++ * Here are the schemes providing that barrier on the ++ * various architectures: ++ * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. ++ * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. ++ * - finish_lock_switch() for weakly-ordered ++ * architectures where spin_unlock is a full barrier, ++ * - switch_to() for arm64 (weakly-ordered, spin_unlock ++ * is a RELEASE barrier), ++ */ ++ ++*switch_count; ++ ++ psi_sched_switch(prev, next, !task_on_rq_queued(prev)); ++ ++ trace_sched_switch(preempt, prev, next); ++ ++ /* Also unlocks the rq: */ ++ rq = context_switch(rq, prev, next); ++ } else { ++ __balance_callbacks(rq); ++ raw_spin_unlock_irq(&rq->lock); ++ } ++ ++#ifdef CONFIG_SCHED_SMT ++ sg_balance_check(rq); ++#endif ++} ++ ++void __noreturn do_task_dead(void) ++{ ++ /* Causes final put_task_struct in finish_task_switch(): */ ++ set_special_state(TASK_DEAD); ++ ++ /* Tell freezer to ignore us: */ ++ current->flags |= PF_NOFREEZE; ++ ++ __schedule(false); ++ BUG(); ++ ++ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ ++ for (;;) ++ cpu_relax(); ++} ++ ++static inline void sched_submit_work(struct task_struct *tsk) ++{ ++ unsigned int task_flags; ++ ++ if (!tsk->state) ++ return; ++ ++ task_flags = tsk->flags; ++ /* ++ * If a worker went to sleep, notify and ask workqueue whether ++ * it wants to wake up a task to maintain concurrency. ++ * As this function is called inside the schedule() context, ++ * we disable preemption to avoid it calling schedule() again ++ * in the possible wakeup of a kworker and because wq_worker_sleeping() ++ * requires it. ++ */ ++ if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) { ++ preempt_disable(); ++ if (task_flags & PF_WQ_WORKER) ++ wq_worker_sleeping(tsk); ++ else ++ io_wq_worker_sleeping(tsk); ++ preempt_enable_no_resched(); ++ } ++ ++ if (tsk_is_pi_blocked(tsk)) ++ return; ++ ++ /* ++ * If we are going to sleep and we have plugged IO queued, ++ * make sure to submit it to avoid deadlocks. ++ */ ++ if (blk_needs_flush_plug(tsk)) ++ blk_schedule_flush_plug(tsk); ++} ++ ++static void sched_update_worker(struct task_struct *tsk) ++{ ++ if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_running(tsk); ++ else ++ io_wq_worker_running(tsk); ++ } ++} ++ ++asmlinkage __visible void __sched schedule(void) ++{ ++ struct task_struct *tsk = current; ++ ++ sched_submit_work(tsk); ++ do { ++ preempt_disable(); ++ __schedule(false); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); ++ sched_update_worker(tsk); ++} ++EXPORT_SYMBOL(schedule); ++ ++/* ++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted ++ * state (have scheduled out non-voluntarily) by making sure that all ++ * tasks have either left the run queue or have gone into user space. ++ * As idle tasks do not do either, they must not ever be preempted ++ * (schedule out non-voluntarily). ++ * ++ * schedule_idle() is similar to schedule_preempt_disable() except that it ++ * never enables preemption because it does not call sched_submit_work(). ++ */ ++void __sched schedule_idle(void) ++{ ++ /* ++ * As this skips calling sched_submit_work(), which the idle task does ++ * regardless because that function is a nop when the task is in a ++ * TASK_RUNNING state, make sure this isn't used someplace that the ++ * current task can be in any other state. Note, idle is always in the ++ * TASK_RUNNING state. ++ */ ++ WARN_ON_ONCE(current->state); ++ do { ++ __schedule(false); ++ } while (need_resched()); ++} ++ ++#if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) ++asmlinkage __visible void __sched schedule_user(void) ++{ ++ /* ++ * If we come here after a random call to set_need_resched(), ++ * or we have been woken up remotely but the IPI has not yet arrived, ++ * we haven't yet exited the RCU idle mode. Do it here manually until ++ * we find a better solution. ++ * ++ * NB: There are buggy callers of this function. Ideally we ++ * should warn if prev_state != CONTEXT_USER, but that will trigger ++ * too frequently to make sense yet. ++ */ ++ enum ctx_state prev_state = exception_enter(); ++ schedule(); ++ exception_exit(prev_state); ++} ++#endif ++ ++/** ++ * schedule_preempt_disabled - called with preemption disabled ++ * ++ * Returns with preemption disabled. Note: preempt_count must be 1 ++ */ ++void __sched schedule_preempt_disabled(void) ++{ ++ sched_preempt_enable_no_resched(); ++ schedule(); ++ preempt_disable(); ++} ++ ++static void __sched notrace preempt_schedule_common(void) ++{ ++ do { ++ /* ++ * Because the function tracer can trace preempt_count_sub() ++ * and it also uses preempt_enable/disable_notrace(), if ++ * NEED_RESCHED is set, the preempt_enable_notrace() called ++ * by the function tracer will call this function again and ++ * cause infinite recursion. ++ * ++ * Preemption must be disabled here before the function ++ * tracer can trace. Break up preempt_disable() into two ++ * calls. One to disable preemption without fear of being ++ * traced. The other to still record the preemption latency, ++ * which can also be traced by the function tracer. ++ */ ++ preempt_disable_notrace(); ++ preempt_latency_start(1); ++ __schedule(true); ++ preempt_latency_stop(1); ++ preempt_enable_no_resched_notrace(); ++ ++ /* ++ * Check again in case we missed a preemption opportunity ++ * between schedule and now. ++ */ ++ } while (need_resched()); ++} ++ ++#ifdef CONFIG_PREEMPTION ++/* ++ * This is the entry point to schedule() from in-kernel preemption ++ * off of preempt_enable. ++ */ ++asmlinkage __visible void __sched notrace preempt_schedule(void) ++{ ++ /* ++ * If there is a non-zero preempt_count or interrupts are disabled, ++ * we do not want to preempt the current task. Just return.. ++ */ ++ if (likely(!preemptible())) ++ return; ++ ++ preempt_schedule_common(); ++} ++NOKPROBE_SYMBOL(preempt_schedule); ++EXPORT_SYMBOL(preempt_schedule); ++ ++#ifdef CONFIG_PREEMPT_DYNAMIC ++DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func); ++EXPORT_STATIC_CALL_TRAMP(preempt_schedule); ++#endif ++ ++ ++/** ++ * preempt_schedule_notrace - preempt_schedule called by tracing ++ * ++ * The tracing infrastructure uses preempt_enable_notrace to prevent ++ * recursion and tracing preempt enabling caused by the tracing ++ * infrastructure itself. But as tracing can happen in areas coming ++ * from userspace or just about to enter userspace, a preempt enable ++ * can occur before user_exit() is called. This will cause the scheduler ++ * to be called when the system is still in usermode. ++ * ++ * To prevent this, the preempt_enable_notrace will use this function ++ * instead of preempt_schedule() to exit user context if needed before ++ * calling the scheduler. ++ */ ++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) ++{ ++ enum ctx_state prev_ctx; ++ ++ if (likely(!preemptible())) ++ return; ++ ++ do { ++ /* ++ * Because the function tracer can trace preempt_count_sub() ++ * and it also uses preempt_enable/disable_notrace(), if ++ * NEED_RESCHED is set, the preempt_enable_notrace() called ++ * by the function tracer will call this function again and ++ * cause infinite recursion. ++ * ++ * Preemption must be disabled here before the function ++ * tracer can trace. Break up preempt_disable() into two ++ * calls. One to disable preemption without fear of being ++ * traced. The other to still record the preemption latency, ++ * which can also be traced by the function tracer. ++ */ ++ preempt_disable_notrace(); ++ preempt_latency_start(1); ++ /* ++ * Needs preempt disabled in case user_exit() is traced ++ * and the tracer calls preempt_enable_notrace() causing ++ * an infinite recursion. ++ */ ++ prev_ctx = exception_enter(); ++ __schedule(true); ++ exception_exit(prev_ctx); ++ ++ preempt_latency_stop(1); ++ preempt_enable_no_resched_notrace(); ++ } while (need_resched()); ++} ++EXPORT_SYMBOL_GPL(preempt_schedule_notrace); ++ ++#ifdef CONFIG_PREEMPT_DYNAMIC ++DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func); ++EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); ++#endif ++ ++#endif /* CONFIG_PREEMPTION */ ++ ++#ifdef CONFIG_PREEMPT_DYNAMIC ++ ++#include ++ ++/* ++ * SC:cond_resched ++ * SC:might_resched ++ * SC:preempt_schedule ++ * SC:preempt_schedule_notrace ++ * SC:irqentry_exit_cond_resched ++ * ++ * ++ * NONE: ++ * cond_resched <- __cond_resched ++ * might_resched <- RET0 ++ * preempt_schedule <- NOP ++ * preempt_schedule_notrace <- NOP ++ * irqentry_exit_cond_resched <- NOP ++ * ++ * VOLUNTARY: ++ * cond_resched <- __cond_resched ++ * might_resched <- __cond_resched ++ * preempt_schedule <- NOP ++ * preempt_schedule_notrace <- NOP ++ * irqentry_exit_cond_resched <- NOP ++ * ++ * FULL: ++ * cond_resched <- RET0 ++ * might_resched <- RET0 ++ * preempt_schedule <- preempt_schedule ++ * preempt_schedule_notrace <- preempt_schedule_notrace ++ * irqentry_exit_cond_resched <- irqentry_exit_cond_resched ++ */ ++ ++enum { ++ preempt_dynamic_none = 0, ++ preempt_dynamic_voluntary, ++ preempt_dynamic_full, ++}; ++ ++static int preempt_dynamic_mode = preempt_dynamic_full; ++ ++static int sched_dynamic_mode(const char *str) ++{ ++ if (!strcmp(str, "none")) ++ return 0; ++ ++ if (!strcmp(str, "voluntary")) ++ return 1; ++ ++ if (!strcmp(str, "full")) ++ return 2; ++ ++ return -1; ++} ++ ++static void sched_dynamic_update(int mode) ++{ ++ /* ++ * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in ++ * the ZERO state, which is invalid. ++ */ ++ static_call_update(cond_resched, __cond_resched); ++ static_call_update(might_resched, __cond_resched); ++ static_call_update(preempt_schedule, __preempt_schedule_func); ++ static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func); ++ static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched); ++ ++ switch (mode) { ++ case preempt_dynamic_none: ++ static_call_update(cond_resched, __cond_resched); ++ static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0); ++ static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); ++ static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); ++ static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); ++ pr_info("Dynamic Preempt: none\n"); ++ break; ++ ++ case preempt_dynamic_voluntary: ++ static_call_update(cond_resched, __cond_resched); ++ static_call_update(might_resched, __cond_resched); ++ static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); ++ static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); ++ static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); ++ pr_info("Dynamic Preempt: voluntary\n"); ++ break; ++ ++ case preempt_dynamic_full: ++ static_call_update(cond_resched, (typeof(&__cond_resched)) __static_call_return0); ++ static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0); ++ static_call_update(preempt_schedule, __preempt_schedule_func); ++ static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func); ++ static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched); ++ pr_info("Dynamic Preempt: full\n"); ++ break; ++ } ++ ++ preempt_dynamic_mode = mode; ++} ++ ++static int __init setup_preempt_mode(char *str) ++{ ++ int mode = sched_dynamic_mode(str); ++ if (mode < 0) { ++ pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); ++ return 1; ++ } ++ ++ sched_dynamic_update(mode); ++ return 0; ++} ++__setup("preempt=", setup_preempt_mode); ++ ++#ifdef CONFIG_SCHED_DEBUG ++ ++static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[16]; ++ int mode; ++ ++ if (cnt > 15) ++ cnt = 15; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ ++ buf[cnt] = 0; ++ mode = sched_dynamic_mode(strstrip(buf)); ++ if (mode < 0) ++ return mode; ++ ++ sched_dynamic_update(mode); ++ ++ *ppos += cnt; ++ ++ return cnt; ++} ++ ++static int sched_dynamic_show(struct seq_file *m, void *v) ++{ ++ static const char * preempt_modes[] = { ++ "none", "voluntary", "full" ++ }; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) { ++ if (preempt_dynamic_mode == i) ++ seq_puts(m, "("); ++ seq_puts(m, preempt_modes[i]); ++ if (preempt_dynamic_mode == i) ++ seq_puts(m, ")"); ++ ++ seq_puts(m, " "); ++ } ++ ++ seq_puts(m, "\n"); ++ return 0; ++} ++ ++static int sched_dynamic_open(struct inode *inode, struct file *filp) ++{ ++ return single_open(filp, sched_dynamic_show, NULL); ++} ++ ++static const struct file_operations sched_dynamic_fops = { ++ .open = sched_dynamic_open, ++ .write = sched_dynamic_write, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static __init int sched_init_debug_dynamic(void) ++{ ++ debugfs_create_file("sched_preempt", 0644, NULL, NULL, &sched_dynamic_fops); ++ return 0; ++} ++late_initcall(sched_init_debug_dynamic); ++ ++#endif /* CONFIG_SCHED_DEBUG */ ++#endif /* CONFIG_PREEMPT_DYNAMIC */ ++ ++ ++/* ++ * This is the entry point to schedule() from kernel preemption ++ * off of irq context. ++ * Note, that this is called and return with irqs disabled. This will ++ * protect us against recursive calling from irq. ++ */ ++asmlinkage __visible void __sched preempt_schedule_irq(void) ++{ ++ enum ctx_state prev_state; ++ ++ /* Catch callers which need to be fixed */ ++ BUG_ON(preempt_count() || !irqs_disabled()); ++ ++ prev_state = exception_enter(); ++ ++ do { ++ preempt_disable(); ++ local_irq_enable(); ++ __schedule(true); ++ local_irq_disable(); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); ++ ++ exception_exit(prev_state); ++} ++ ++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, ++ void *key) ++{ ++ WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC); ++ return try_to_wake_up(curr->private, mode, wake_flags); ++} ++EXPORT_SYMBOL(default_wake_function); ++ ++static inline void check_task_changed(struct rq *rq, struct task_struct *p) ++{ ++ /* Trigger resched if task sched_prio has been modified. */ ++ if (task_on_rq_queued(p) && sched_task_need_requeue(p, rq)) { ++ requeue_task(p, rq); ++ check_preempt_curr(rq); ++ } ++} ++ ++#ifdef CONFIG_RT_MUTEXES ++ ++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) ++{ ++ if (pi_task) ++ prio = min(prio, pi_task->prio); ++ ++ return prio; ++} ++ ++static inline int rt_effective_prio(struct task_struct *p, int prio) ++{ ++ struct task_struct *pi_task = rt_mutex_get_top_task(p); ++ ++ return __rt_effective_prio(pi_task, prio); ++} ++ ++/* ++ * rt_mutex_setprio - set the current priority of a task ++ * @p: task to boost ++ * @pi_task: donor task ++ * ++ * This function changes the 'effective' priority of a task. It does ++ * not touch ->normal_prio like __setscheduler(). ++ * ++ * Used by the rt_mutex code to implement priority inheritance ++ * logic. Call site only calls if the priority of the task changed. ++ */ ++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) ++{ ++ int prio; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ ++ /* XXX used to be waiter->prio, not waiter->task->prio */ ++ prio = __rt_effective_prio(pi_task, p->normal_prio); ++ ++ /* ++ * If nothing changed; bail early. ++ */ ++ if (p->pi_top_task == pi_task && prio == p->prio) ++ return; ++ ++ rq = __task_access_lock(p, &lock); ++ /* ++ * Set under pi_lock && rq->lock, such that the value can be used under ++ * either lock. ++ * ++ * Note that there is loads of tricky to make this pointer cache work ++ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to ++ * ensure a task is de-boosted (pi_task is set to NULL) before the ++ * task is allowed to run again (and can exit). This ensures the pointer ++ * points to a blocked task -- which guarantees the task is present. ++ */ ++ p->pi_top_task = pi_task; ++ ++ /* ++ * For FIFO/RR we only need to set prio, if that matches we're done. ++ */ ++ if (prio == p->prio) ++ goto out_unlock; ++ ++ /* ++ * Idle task boosting is a nono in general. There is one ++ * exception, when PREEMPT_RT and NOHZ is active: ++ * ++ * The idle task calls get_next_timer_interrupt() and holds ++ * the timer wheel base->lock on the CPU and another CPU wants ++ * to access the timer (probably to cancel it). We can safely ++ * ignore the boosting request, as the idle CPU runs this code ++ * with interrupts disabled and will complete the lock ++ * protected section without being interrupted. So there is no ++ * real need to boost. ++ */ ++ if (unlikely(p == rq->idle)) { ++ WARN_ON(p != rq->curr); ++ WARN_ON(p->pi_blocked_on); ++ goto out_unlock; ++ } ++ ++ trace_sched_pi_setprio(p, pi_task); ++ p->prio = prio; ++ update_task_priodl(p); ++ ++ check_task_changed(rq, p); ++out_unlock: ++ /* Avoid rq from going away on us: */ ++ preempt_disable(); ++ ++ __balance_callbacks(rq); ++ __task_access_unlock(p, lock); ++ ++ preempt_enable(); ++} ++#else ++static inline int rt_effective_prio(struct task_struct *p, int prio) ++{ ++ return prio; ++} ++#endif ++ ++void set_user_nice(struct task_struct *p, long nice) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ ++ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) ++ return; ++ /* ++ * We have to be careful, if called from sys_setpriority(), ++ * the task might be in the middle of scheduling on another CPU. ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ rq = __task_access_lock(p, &lock); ++ ++ p->static_prio = NICE_TO_PRIO(nice); ++ /* ++ * The RT priorities are set via sched_setscheduler(), but we still ++ * allow the 'normal' nice value to be set - but as expected ++ * it won't have any effect on scheduling until the task is ++ * not SCHED_NORMAL/SCHED_BATCH: ++ */ ++ if (task_has_rt_policy(p)) ++ goto out_unlock; ++ ++ p->prio = effective_prio(p); ++ update_task_priodl(p); ++ ++ check_task_changed(rq, p); ++out_unlock: ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++} ++EXPORT_SYMBOL(set_user_nice); ++ ++/* ++ * can_nice - check if a task can reduce its nice value ++ * @p: task ++ * @nice: nice value ++ */ ++int can_nice(const struct task_struct *p, const int nice) ++{ ++ /* Convert nice value [19,-20] to rlimit style value [1,40] */ ++ int nice_rlim = nice_to_rlimit(nice); ++ ++ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || ++ capable(CAP_SYS_NICE)); ++} ++ ++#ifdef __ARCH_WANT_SYS_NICE ++ ++/* ++ * sys_nice - change the priority of the current process. ++ * @increment: priority increment ++ * ++ * sys_setpriority is a more generic, but much slower function that ++ * does similar things. ++ */ ++SYSCALL_DEFINE1(nice, int, increment) ++{ ++ long nice, retval; ++ ++ /* ++ * Setpriority might change our priority at the same moment. ++ * We don't have to worry. Conceptually one call occurs first ++ * and we have a single winner. ++ */ ++ ++ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); ++ nice = task_nice(current) + increment; ++ ++ nice = clamp_val(nice, MIN_NICE, MAX_NICE); ++ if (increment < 0 && !can_nice(current, nice)) ++ return -EPERM; ++ ++ retval = security_task_setnice(current, nice); ++ if (retval) ++ return retval; ++ ++ set_user_nice(current, nice); ++ return 0; ++} ++ ++#endif ++ ++/** ++ * idle_cpu - is a given CPU idle currently? ++ * @cpu: the processor in question. ++ * ++ * Return: 1 if the CPU is currently idle. 0 otherwise. ++ */ ++int idle_cpu(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (rq->curr != rq->idle) ++ return 0; ++ ++ if (rq->nr_running) ++ return 0; ++ ++#ifdef CONFIG_SMP ++ if (rq->ttwu_pending) ++ return 0; ++#endif ++ ++ return 1; ++} ++ ++/** ++ * idle_task - return the idle task for a given CPU. ++ * @cpu: the processor in question. ++ * ++ * Return: The idle task for the cpu @cpu. ++ */ ++struct task_struct *idle_task(int cpu) ++{ ++ return cpu_rq(cpu)->idle; ++} ++ ++/** ++ * find_process_by_pid - find a process with a matching PID value. ++ * @pid: the pid in question. ++ * ++ * The task of @pid, if found. %NULL otherwise. ++ */ ++static inline struct task_struct *find_process_by_pid(pid_t pid) ++{ ++ return pid ? find_task_by_vpid(pid) : current; ++} ++ ++/* ++ * sched_setparam() passes in -1 for its policy, to let the functions ++ * it calls know not to change it. ++ */ ++#define SETPARAM_POLICY -1 ++ ++static void __setscheduler_params(struct task_struct *p, ++ const struct sched_attr *attr) ++{ ++ int policy = attr->sched_policy; ++ ++ if (policy == SETPARAM_POLICY) ++ policy = p->policy; ++ ++ p->policy = policy; ++ ++ /* ++ * allow normal nice value to be set, but will not have any ++ * effect on scheduling until the task not SCHED_NORMAL/ ++ * SCHED_BATCH ++ */ ++ p->static_prio = NICE_TO_PRIO(attr->sched_nice); ++ ++ /* ++ * __sched_setscheduler() ensures attr->sched_priority == 0 when ++ * !rt_policy. Always setting this ensures that things like ++ * getparam()/getattr() don't report silly values for !rt tasks. ++ */ ++ p->rt_priority = attr->sched_priority; ++ p->normal_prio = normal_prio(p); ++} ++ ++/* Actually do priority change: must hold rq lock. */ ++static void __setscheduler(struct rq *rq, struct task_struct *p, ++ const struct sched_attr *attr, bool keep_boost) ++{ ++ __setscheduler_params(p, attr); ++ ++ /* ++ * Keep a potential priority boosting if called from ++ * sched_setscheduler(). ++ */ ++ p->prio = normal_prio(p); ++ if (keep_boost) ++ p->prio = rt_effective_prio(p, p->prio); ++ update_task_priodl(p); ++} ++ ++/* ++ * check the target process has a UID that matches the current process's ++ */ ++static bool check_same_owner(struct task_struct *p) ++{ ++ const struct cred *cred = current_cred(), *pcred; ++ bool match; ++ ++ rcu_read_lock(); ++ pcred = __task_cred(p); ++ match = (uid_eq(cred->euid, pcred->euid) || ++ uid_eq(cred->euid, pcred->uid)); ++ rcu_read_unlock(); ++ return match; ++} ++ ++static int __sched_setscheduler(struct task_struct *p, ++ const struct sched_attr *attr, ++ bool user, bool pi) ++{ ++ const struct sched_attr dl_squash_attr = { ++ .size = sizeof(struct sched_attr), ++ .sched_policy = SCHED_FIFO, ++ .sched_nice = 0, ++ .sched_priority = 99, ++ }; ++ int newprio = MAX_RT_PRIO - 1 - attr->sched_priority; ++ int retval, oldpolicy = -1; ++ int policy = attr->sched_policy; ++ struct callback_head *head; ++ unsigned long flags; ++ struct rq *rq; ++ int reset_on_fork; ++ raw_spinlock_t *lock; ++ ++ /* The pi code expects interrupts enabled */ ++ BUG_ON(pi && in_interrupt()); ++ ++ /* ++ * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO ++ */ ++ if (unlikely(SCHED_DEADLINE == policy)) { ++ attr = &dl_squash_attr; ++ policy = attr->sched_policy; ++ newprio = MAX_RT_PRIO - 1 - attr->sched_priority; ++ } ++recheck: ++ /* Double check policy once rq lock held */ ++ if (policy < 0) { ++ reset_on_fork = p->sched_reset_on_fork; ++ policy = oldpolicy = p->policy; ++ } else { ++ reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK); ++ ++ if (policy > SCHED_IDLE) ++ return -EINVAL; ++ } ++ ++ if (attr->sched_flags & ~(SCHED_FLAG_ALL)) ++ return -EINVAL; ++ ++ /* ++ * Valid priorities for SCHED_FIFO and SCHED_RR are ++ * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and ++ * SCHED_BATCH and SCHED_IDLE is 0. ++ */ ++ if (attr->sched_priority < 0 || ++ (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) || ++ (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1)) ++ return -EINVAL; ++ if ((SCHED_RR == policy || SCHED_FIFO == policy) != ++ (attr->sched_priority != 0)) ++ return -EINVAL; ++ ++ /* ++ * Allow unprivileged RT tasks to decrease priority: ++ */ ++ if (user && !capable(CAP_SYS_NICE)) { ++ if (SCHED_FIFO == policy || SCHED_RR == policy) { ++ unsigned long rlim_rtprio = ++ task_rlimit(p, RLIMIT_RTPRIO); ++ ++ /* Can't set/change the rt policy */ ++ if (policy != p->policy && !rlim_rtprio) ++ return -EPERM; ++ ++ /* Can't increase priority */ ++ if (attr->sched_priority > p->rt_priority && ++ attr->sched_priority > rlim_rtprio) ++ return -EPERM; ++ } ++ ++ /* Can't change other user's priorities */ ++ if (!check_same_owner(p)) ++ return -EPERM; ++ ++ /* Normal users shall not reset the sched_reset_on_fork flag */ ++ if (p->sched_reset_on_fork && !reset_on_fork) ++ return -EPERM; ++ } ++ ++ if (user) { ++ retval = security_task_setscheduler(p); ++ if (retval) ++ return retval; ++ } ++ ++ if (pi) ++ cpuset_read_lock(); ++ ++ /* ++ * Make sure no PI-waiters arrive (or leave) while we are ++ * changing the priority of the task: ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ ++ /* ++ * To be able to change p->policy safely, task_access_lock() ++ * must be called. ++ * IF use task_access_lock() here: ++ * For the task p which is not running, reading rq->stop is ++ * racy but acceptable as ->stop doesn't change much. ++ * An enhancemnet can be made to read rq->stop saftly. ++ */ ++ rq = __task_access_lock(p, &lock); ++ ++ /* ++ * Changing the policy of the stop threads its a very bad idea ++ */ ++ if (p == rq->stop) { ++ retval = -EINVAL; ++ goto unlock; ++ } ++ ++ /* ++ * If not changing anything there's no need to proceed further: ++ */ ++ if (unlikely(policy == p->policy)) { ++ if (rt_policy(policy) && attr->sched_priority != p->rt_priority) ++ goto change; ++ if (!rt_policy(policy) && ++ NICE_TO_PRIO(attr->sched_nice) != p->static_prio) ++ goto change; ++ ++ p->sched_reset_on_fork = reset_on_fork; ++ retval = 0; ++ goto unlock; ++ } ++change: ++ ++ /* Re-check policy now with rq lock held */ ++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { ++ policy = oldpolicy = -1; ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ if (pi) ++ cpuset_read_unlock(); ++ goto recheck; ++ } ++ ++ p->sched_reset_on_fork = reset_on_fork; ++ ++ if (pi) { ++ /* ++ * Take priority boosted tasks into account. If the new ++ * effective priority is unchanged, we just store the new ++ * normal parameters and do not touch the scheduler class and ++ * the runqueue. This will be done when the task deboost ++ * itself. ++ */ ++ if (rt_effective_prio(p, newprio) == p->prio) { ++ __setscheduler_params(p, attr); ++ retval = 0; ++ goto unlock; ++ } ++ } ++ ++ __setscheduler(rq, p, attr, pi); ++ ++ check_task_changed(rq, p); ++ ++ /* Avoid rq from going away on us: */ ++ preempt_disable(); ++ head = splice_balance_callbacks(rq); ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++ if (pi) { ++ cpuset_read_unlock(); ++ rt_mutex_adjust_pi(p); ++ } ++ ++ /* Run balance callbacks after we've adjusted the PI chain: */ ++ balance_callbacks(rq, head); ++ preempt_enable(); ++ ++ return 0; ++ ++unlock: ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ if (pi) ++ cpuset_read_unlock(); ++ return retval; ++} ++ ++static int _sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param, bool check) ++{ ++ struct sched_attr attr = { ++ .sched_policy = policy, ++ .sched_priority = param->sched_priority, ++ .sched_nice = PRIO_TO_NICE(p->static_prio), ++ }; ++ ++ /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ ++ if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { ++ attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; ++ policy &= ~SCHED_RESET_ON_FORK; ++ attr.sched_policy = policy; ++ } ++ ++ return __sched_setscheduler(p, &attr, check, true); ++} ++ ++/** ++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Use sched_set_fifo(), read its comment. ++ * ++ * Return: 0 on success. An error code otherwise. ++ * ++ * NOTE that the task may be already dead. ++ */ ++int sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return _sched_setscheduler(p, policy, param, true); ++} ++ ++int sched_setattr(struct task_struct *p, const struct sched_attr *attr) ++{ ++ return __sched_setscheduler(p, attr, true, true); ++} ++ ++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) ++{ ++ return __sched_setscheduler(p, attr, false, true); ++} ++ ++/** ++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Just like sched_setscheduler, only don't bother checking if the ++ * current context has permission. For example, this is needed in ++ * stop_machine(): we create temporary high priority worker threads, ++ * but our caller might not have that capability. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++int sched_setscheduler_nocheck(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return _sched_setscheduler(p, policy, param, false); ++} ++ ++/* ++ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally ++ * incapable of resource management, which is the one thing an OS really should ++ * be doing. ++ * ++ * This is of course the reason it is limited to privileged users only. ++ * ++ * Worse still; it is fundamentally impossible to compose static priority ++ * workloads. You cannot take two correctly working static prio workloads ++ * and smash them together and still expect them to work. ++ * ++ * For this reason 'all' FIFO tasks the kernel creates are basically at: ++ * ++ * MAX_RT_PRIO / 2 ++ * ++ * The administrator _MUST_ configure the system, the kernel simply doesn't ++ * know enough information to make a sensible choice. ++ */ ++void sched_set_fifo(struct task_struct *p) ++{ ++ struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; ++ WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); ++} ++EXPORT_SYMBOL_GPL(sched_set_fifo); ++ ++/* ++ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. ++ */ ++void sched_set_fifo_low(struct task_struct *p) ++{ ++ struct sched_param sp = { .sched_priority = 1 }; ++ WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); ++} ++EXPORT_SYMBOL_GPL(sched_set_fifo_low); ++ ++void sched_set_normal(struct task_struct *p, int nice) ++{ ++ struct sched_attr attr = { ++ .sched_policy = SCHED_NORMAL, ++ .sched_nice = nice, ++ }; ++ WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); ++} ++EXPORT_SYMBOL_GPL(sched_set_normal); ++ ++static int ++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) ++{ ++ struct sched_param lparam; ++ struct task_struct *p; ++ int retval; ++ ++ if (!param || pid < 0) ++ return -EINVAL; ++ if (copy_from_user(&lparam, param, sizeof(struct sched_param))) ++ return -EFAULT; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (likely(p)) ++ get_task_struct(p); ++ rcu_read_unlock(); ++ ++ if (likely(p)) { ++ retval = sched_setscheduler(p, policy, &lparam); ++ put_task_struct(p); ++ } ++ ++ return retval; ++} ++ ++/* ++ * Mimics kernel/events/core.c perf_copy_attr(). ++ */ ++static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) ++{ ++ u32 size; ++ int ret; ++ ++ /* Zero the full structure, so that a short copy will be nice: */ ++ memset(attr, 0, sizeof(*attr)); ++ ++ ret = get_user(size, &uattr->size); ++ if (ret) ++ return ret; ++ ++ /* ABI compatibility quirk: */ ++ if (!size) ++ size = SCHED_ATTR_SIZE_VER0; ++ ++ if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) ++ goto err_size; ++ ++ ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); ++ if (ret) { ++ if (ret == -E2BIG) ++ goto err_size; ++ return ret; ++ } ++ ++ /* ++ * XXX: Do we want to be lenient like existing syscalls; or do we want ++ * to be strict and return an error on out-of-bounds values? ++ */ ++ attr->sched_nice = clamp(attr->sched_nice, -20, 19); ++ ++ /* sched/core.c uses zero here but we already know ret is zero */ ++ return 0; ++ ++err_size: ++ put_user(sizeof(*attr), &uattr->size); ++ return -E2BIG; ++} ++ ++/** ++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority ++ * @pid: the pid in question. ++ * @policy: new policy. ++ * ++ * Return: 0 on success. An error code otherwise. ++ * @param: structure containing the new RT priority. ++ */ ++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) ++{ ++ if (policy < 0) ++ return -EINVAL; ++ ++ return do_sched_setscheduler(pid, policy, param); ++} ++ ++/** ++ * sys_sched_setparam - set/change the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the new RT priority. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ return do_sched_setscheduler(pid, SETPARAM_POLICY, param); ++} ++ ++/** ++ * sys_sched_setattr - same as above, but with extended sched_attr ++ * @pid: the pid in question. ++ * @uattr: structure containing the extended parameters. ++ */ ++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, ++ unsigned int, flags) ++{ ++ struct sched_attr attr; ++ struct task_struct *p; ++ int retval; ++ ++ if (!uattr || pid < 0 || flags) ++ return -EINVAL; ++ ++ retval = sched_copy_attr(uattr, &attr); ++ if (retval) ++ return retval; ++ ++ if ((int)attr.sched_policy < 0) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (likely(p)) ++ get_task_struct(p); ++ rcu_read_unlock(); ++ ++ if (likely(p)) { ++ retval = sched_setattr(p, &attr); ++ put_task_struct(p); ++ } ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread ++ * @pid: the pid in question. ++ * ++ * Return: On success, the policy of the thread. Otherwise, a negative error ++ * code. ++ */ ++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) ++{ ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (pid < 0) ++ goto out_nounlock; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (p) { ++ retval = security_task_getscheduler(p); ++ if (!retval) ++ retval = p->policy; ++ } ++ rcu_read_unlock(); ++ ++out_nounlock: ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the RT priority. ++ * ++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error ++ * code. ++ */ ++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ struct sched_param lp = { .sched_priority = 0 }; ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (!param || pid < 0) ++ goto out_nounlock; ++ ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ if (task_has_rt_policy(p)) ++ lp.sched_priority = p->rt_priority; ++ rcu_read_unlock(); ++ ++ /* ++ * This one might sleep, we cannot do it with a spinlock held ... ++ */ ++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; ++ ++out_nounlock: ++ return retval; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++/* ++ * Copy the kernel size attribute structure (which might be larger ++ * than what user-space knows about) to user-space. ++ * ++ * Note that all cases are valid: user-space buffer can be larger or ++ * smaller than the kernel-space buffer. The usual case is that both ++ * have the same size. ++ */ ++static int ++sched_attr_copy_to_user(struct sched_attr __user *uattr, ++ struct sched_attr *kattr, ++ unsigned int usize) ++{ ++ unsigned int ksize = sizeof(*kattr); ++ ++ if (!access_ok(uattr, usize)) ++ return -EFAULT; ++ ++ /* ++ * sched_getattr() ABI forwards and backwards compatibility: ++ * ++ * If usize == ksize then we just copy everything to user-space and all is good. ++ * ++ * If usize < ksize then we only copy as much as user-space has space for, ++ * this keeps ABI compatibility as well. We skip the rest. ++ * ++ * If usize > ksize then user-space is using a newer version of the ABI, ++ * which part the kernel doesn't know about. Just ignore it - tooling can ++ * detect the kernel's knowledge of attributes from the attr->size value ++ * which is set to ksize in this case. ++ */ ++ kattr->size = min(usize, ksize); ++ ++ if (copy_to_user(uattr, kattr, kattr->size)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++/** ++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr ++ * @pid: the pid in question. ++ * @uattr: structure containing the extended parameters. ++ * @usize: sizeof(attr) for fwd/bwd comp. ++ * @flags: for future extension. ++ */ ++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, ++ unsigned int, usize, unsigned int, flags) ++{ ++ struct sched_attr kattr = { }; ++ struct task_struct *p; ++ int retval; ++ ++ if (!uattr || pid < 0 || usize > PAGE_SIZE || ++ usize < SCHED_ATTR_SIZE_VER0 || flags) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ kattr.sched_policy = p->policy; ++ if (p->sched_reset_on_fork) ++ kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; ++ if (task_has_rt_policy(p)) ++ kattr.sched_priority = p->rt_priority; ++ else ++ kattr.sched_nice = task_nice(p); ++ ++#ifdef CONFIG_UCLAMP_TASK ++ kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; ++ kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; ++#endif ++ ++ rcu_read_unlock(); ++ ++ return sched_attr_copy_to_user(uattr, &kattr, usize); ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) ++{ ++ cpumask_var_t cpus_allowed, new_mask; ++ struct task_struct *p; ++ int retval; ++ ++ rcu_read_lock(); ++ ++ p = find_process_by_pid(pid); ++ if (!p) { ++ rcu_read_unlock(); ++ return -ESRCH; ++ } ++ ++ /* Prevent p going away */ ++ get_task_struct(p); ++ rcu_read_unlock(); ++ ++ if (p->flags & PF_NO_SETAFFINITY) { ++ retval = -EINVAL; ++ goto out_put_task; ++ } ++ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { ++ retval = -ENOMEM; ++ goto out_put_task; ++ } ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { ++ retval = -ENOMEM; ++ goto out_free_cpus_allowed; ++ } ++ retval = -EPERM; ++ if (!check_same_owner(p)) { ++ rcu_read_lock(); ++ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { ++ rcu_read_unlock(); ++ goto out_free_new_mask; ++ } ++ rcu_read_unlock(); ++ } ++ ++ retval = security_task_setscheduler(p); ++ if (retval) ++ goto out_free_new_mask; ++ ++ cpuset_cpus_allowed(p, cpus_allowed); ++ cpumask_and(new_mask, in_mask, cpus_allowed); ++ ++again: ++ retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK); ++ ++ if (!retval) { ++ cpuset_cpus_allowed(p, cpus_allowed); ++ if (!cpumask_subset(new_mask, cpus_allowed)) { ++ /* ++ * We must have raced with a concurrent cpuset ++ * update. Just reset the cpus_allowed to the ++ * cpuset's cpus_allowed ++ */ ++ cpumask_copy(new_mask, cpus_allowed); ++ goto again; ++ } ++ } ++out_free_new_mask: ++ free_cpumask_var(new_mask); ++out_free_cpus_allowed: ++ free_cpumask_var(cpus_allowed); ++out_put_task: ++ put_task_struct(p); ++ return retval; ++} ++ ++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, ++ struct cpumask *new_mask) ++{ ++ if (len < cpumask_size()) ++ cpumask_clear(new_mask); ++ else if (len > cpumask_size()) ++ len = cpumask_size(); ++ ++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; ++} ++ ++/** ++ * sys_sched_setaffinity - set the CPU affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to the new CPU mask ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ cpumask_var_t new_mask; ++ int retval; ++ ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); ++ if (retval == 0) ++ retval = sched_setaffinity(pid, new_mask); ++ free_cpumask_var(new_mask); ++ return retval; ++} ++ ++long sched_getaffinity(pid_t pid, cpumask_t *mask) ++{ ++ struct task_struct *p; ++ raw_spinlock_t *lock; ++ unsigned long flags; ++ int retval; ++ ++ rcu_read_lock(); ++ ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ task_access_lock_irqsave(p, &lock, &flags); ++ cpumask_and(mask, &p->cpus_mask, cpu_active_mask); ++ task_access_unlock_irqrestore(p, lock, &flags); ++ ++out_unlock: ++ rcu_read_unlock(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getaffinity - get the CPU affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to hold the current CPU mask ++ * ++ * Return: size of CPU mask copied to user_mask_ptr on success. An ++ * error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ int ret; ++ cpumask_var_t mask; ++ ++ if ((len * BITS_PER_BYTE) < nr_cpu_ids) ++ return -EINVAL; ++ if (len & (sizeof(unsigned long)-1)) ++ return -EINVAL; ++ ++ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ ret = sched_getaffinity(pid, mask); ++ if (ret == 0) { ++ unsigned int retlen = min_t(size_t, len, cpumask_size()); ++ ++ if (copy_to_user(user_mask_ptr, mask, retlen)) ++ ret = -EFAULT; ++ else ++ ret = retlen; ++ } ++ free_cpumask_var(mask); ++ ++ return ret; ++} ++ ++static void do_sched_yield(void) ++{ ++ struct rq *rq; ++ struct rq_flags rf; ++ ++ if (!sched_yield_type) ++ return; ++ ++ rq = this_rq_lock_irq(&rf); ++ ++ schedstat_inc(rq->yld_count); ++ ++ if (1 == sched_yield_type) { ++ if (!rt_task(current)) ++ do_sched_yield_type_1(current, rq); ++ } else if (2 == sched_yield_type) { ++ if (rq->nr_running > 1) ++ rq->skip = current; ++ } ++ ++ preempt_disable(); ++ raw_spin_unlock_irq(&rq->lock); ++ sched_preempt_enable_no_resched(); ++ ++ schedule(); ++} ++ ++/** ++ * sys_sched_yield - yield the current processor to other threads. ++ * ++ * This function yields the current CPU to other tasks. If there are no ++ * other threads running on this CPU then this function will return. ++ * ++ * Return: 0. ++ */ ++SYSCALL_DEFINE0(sched_yield) ++{ ++ do_sched_yield(); ++ return 0; ++} ++ ++#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) ++int __sched __cond_resched(void) ++{ ++ if (should_resched(0)) { ++ preempt_schedule_common(); ++ return 1; ++ } ++#ifndef CONFIG_PREEMPT_RCU ++ rcu_all_qs(); ++#endif ++ return 0; ++} ++EXPORT_SYMBOL(__cond_resched); ++#endif ++ ++#ifdef CONFIG_PREEMPT_DYNAMIC ++DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); ++EXPORT_STATIC_CALL_TRAMP(cond_resched); ++ ++DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); ++EXPORT_STATIC_CALL_TRAMP(might_resched); ++#endif ++ ++/* ++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock, ++ * call schedule, and on return reacquire the lock. ++ * ++ * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level ++ * operations here to prevent schedule() from being called twice (once via ++ * spin_unlock(), once by hand). ++ */ ++int __cond_resched_lock(spinlock_t *lock) ++{ ++ int resched = should_resched(PREEMPT_LOCK_OFFSET); ++ int ret = 0; ++ ++ lockdep_assert_held(lock); ++ ++ if (spin_needbreak(lock) || resched) { ++ spin_unlock(lock); ++ if (resched) ++ preempt_schedule_common(); ++ else ++ cpu_relax(); ++ ret = 1; ++ spin_lock(lock); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(__cond_resched_lock); ++ ++int __cond_resched_rwlock_read(rwlock_t *lock) ++{ ++ int resched = should_resched(PREEMPT_LOCK_OFFSET); ++ int ret = 0; ++ ++ lockdep_assert_held_read(lock); ++ ++ if (rwlock_needbreak(lock) || resched) { ++ read_unlock(lock); ++ if (resched) ++ preempt_schedule_common(); ++ else ++ cpu_relax(); ++ ret = 1; ++ read_lock(lock); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(__cond_resched_rwlock_read); ++ ++int __cond_resched_rwlock_write(rwlock_t *lock) ++{ ++ int resched = should_resched(PREEMPT_LOCK_OFFSET); ++ int ret = 0; ++ ++ lockdep_assert_held_write(lock); ++ ++ if (rwlock_needbreak(lock) || resched) { ++ write_unlock(lock); ++ if (resched) ++ preempt_schedule_common(); ++ else ++ cpu_relax(); ++ ret = 1; ++ write_lock(lock); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(__cond_resched_rwlock_write); ++ ++/** ++ * yield - yield the current processor to other threads. ++ * ++ * Do not ever use this function, there's a 99% chance you're doing it wrong. ++ * ++ * The scheduler is at all times free to pick the calling task as the most ++ * eligible task to run, if removing the yield() call from your code breaks ++ * it, it's already broken. ++ * ++ * Typical broken usage is: ++ * ++ * while (!event) ++ * yield(); ++ * ++ * where one assumes that yield() will let 'the other' process run that will ++ * make event true. If the current task is a SCHED_FIFO task that will never ++ * happen. Never use yield() as a progress guarantee!! ++ * ++ * If you want to use yield() to wait for something, use wait_event(). ++ * If you want to use yield() to be 'nice' for others, use cond_resched(). ++ * If you still want to use yield(), do not! ++ */ ++void __sched yield(void) ++{ ++ set_current_state(TASK_RUNNING); ++ do_sched_yield(); ++} ++EXPORT_SYMBOL(yield); ++ ++/** ++ * yield_to - yield the current processor to another thread in ++ * your thread group, or accelerate that thread toward the ++ * processor it's on. ++ * @p: target task ++ * @preempt: whether task preemption is allowed or not ++ * ++ * It's the caller's job to ensure that the target task struct ++ * can't go away on us before we can do any checks. ++ * ++ * In Alt schedule FW, yield_to is not supported. ++ * ++ * Return: ++ * true (>0) if we indeed boosted the target task. ++ * false (0) if we failed to boost the target. ++ * -ESRCH if there's no task to yield to. ++ */ ++int __sched yield_to(struct task_struct *p, bool preempt) ++{ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(yield_to); ++ ++int io_schedule_prepare(void) ++{ ++ int old_iowait = current->in_iowait; ++ ++ current->in_iowait = 1; ++ blk_schedule_flush_plug(current); ++ ++ return old_iowait; ++} ++ ++void io_schedule_finish(int token) ++{ ++ current->in_iowait = token; ++} ++ ++/* ++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so ++ * that process accounting knows that this is a task in IO wait state. ++ * ++ * But don't do that if it is a deliberate, throttling IO wait (this task ++ * has set its backing_dev_info: the queue against which it should throttle) ++ */ ++ ++long __sched io_schedule_timeout(long timeout) ++{ ++ int token; ++ long ret; ++ ++ token = io_schedule_prepare(); ++ ret = schedule_timeout(timeout); ++ io_schedule_finish(token); ++ ++ return ret; ++} ++EXPORT_SYMBOL(io_schedule_timeout); ++ ++void __sched io_schedule(void) ++{ ++ int token; ++ ++ token = io_schedule_prepare(); ++ schedule(); ++ io_schedule_finish(token); ++} ++EXPORT_SYMBOL(io_schedule); ++ ++/** ++ * sys_sched_get_priority_max - return maximum RT priority. ++ * @policy: scheduling class. ++ * ++ * Return: On success, this syscall returns the maximum ++ * rt_priority that can be used by a given scheduling class. ++ * On failure, a negative error code is returned. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_max, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = MAX_RT_PRIO - 1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_IDLE: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * sys_sched_get_priority_min - return minimum RT priority. ++ * @policy: scheduling class. ++ * ++ * Return: On success, this syscall returns the minimum ++ * rt_priority that can be used by a given scheduling class. ++ * On failure, a negative error code is returned. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_min, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = 1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_IDLE: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) ++{ ++ struct task_struct *p; ++ int retval; ++ ++ alt_sched_debug(); ++ ++ if (pid < 0) ++ return -EINVAL; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ rcu_read_unlock(); ++ ++ *t = ns_to_timespec64(sched_timeslice_ns); ++ return 0; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++/** ++ * sys_sched_rr_get_interval - return the default timeslice of a process. ++ * @pid: pid of the process. ++ * @interval: userspace pointer to the timeslice value. ++ * ++ * ++ * Return: On success, 0 and the timeslice is in @interval. Otherwise, ++ * an error code. ++ */ ++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, ++ struct __kernel_timespec __user *, interval) ++{ ++ struct timespec64 t; ++ int retval = sched_rr_get_interval(pid, &t); ++ ++ if (retval == 0) ++ retval = put_timespec64(&t, interval); ++ ++ return retval; ++} ++ ++#ifdef CONFIG_COMPAT_32BIT_TIME ++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, ++ struct old_timespec32 __user *, interval) ++{ ++ struct timespec64 t; ++ int retval = sched_rr_get_interval(pid, &t); ++ ++ if (retval == 0) ++ retval = put_old_timespec32(&t, interval); ++ return retval; ++} ++#endif ++ ++void sched_show_task(struct task_struct *p) ++{ ++ unsigned long free = 0; ++ int ppid; ++ ++ if (!try_get_task_stack(p)) ++ return; ++ ++ pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); ++ ++ if (p->state == TASK_RUNNING) ++ pr_cont(" running task "); ++#ifdef CONFIG_DEBUG_STACK_USAGE ++ free = stack_not_used(p); ++#endif ++ ppid = 0; ++ rcu_read_lock(); ++ if (pid_alive(p)) ++ ppid = task_pid_nr(rcu_dereference(p->real_parent)); ++ rcu_read_unlock(); ++ pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n", ++ free, task_pid_nr(p), ppid, ++ (unsigned long)task_thread_info(p)->flags); ++ ++ print_worker_info(KERN_INFO, p); ++ print_stop_info(KERN_INFO, p); ++ show_stack(p, NULL, KERN_INFO); ++ put_task_stack(p); ++} ++EXPORT_SYMBOL_GPL(sched_show_task); ++ ++static inline bool ++state_filter_match(unsigned long state_filter, struct task_struct *p) ++{ ++ /* no filter, everything matches */ ++ if (!state_filter) ++ return true; ++ ++ /* filter, but doesn't match */ ++ if (!(p->state & state_filter)) ++ return false; ++ ++ /* ++ * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows ++ * TASK_KILLABLE). ++ */ ++ if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) ++ return false; ++ ++ return true; ++} ++ ++ ++void show_state_filter(unsigned long state_filter) ++{ ++ struct task_struct *g, *p; ++ ++ rcu_read_lock(); ++ for_each_process_thread(g, p) { ++ /* ++ * reset the NMI-timeout, listing all files on a slow ++ * console might take a lot of time: ++ * Also, reset softlockup watchdogs on all CPUs, because ++ * another CPU might be blocked waiting for us to process ++ * an IPI. ++ */ ++ touch_nmi_watchdog(); ++ touch_all_softlockup_watchdogs(); ++ if (state_filter_match(state_filter, p)) ++ sched_show_task(p); ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ /* TODO: Alt schedule FW should support this ++ if (!state_filter) ++ sysrq_sched_debug_show(); ++ */ ++#endif ++ rcu_read_unlock(); ++ /* ++ * Only show locks if all tasks are dumped: ++ */ ++ if (!state_filter) ++ debug_show_all_locks(); ++} ++ ++void dump_cpu_task(int cpu) ++{ ++ pr_info("Task dump for CPU %d:\n", cpu); ++ sched_show_task(cpu_curr(cpu)); ++} ++ ++/** ++ * init_idle - set up an idle thread for a given CPU ++ * @idle: task in question ++ * @cpu: CPU the idle task belongs to ++ * ++ * NOTE: this function does not set the idle thread's NEED_RESCHED ++ * flag, to make booting more robust. ++ */ ++void init_idle(struct task_struct *idle, int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ __sched_fork(0, idle); ++ ++ raw_spin_lock_irqsave(&idle->pi_lock, flags); ++ raw_spin_lock(&rq->lock); ++ update_rq_clock(rq); ++ ++ idle->last_ran = rq->clock_task; ++ idle->state = TASK_RUNNING; ++ idle->flags |= PF_IDLE; ++ sched_queue_init_idle(rq, idle); ++ ++ scs_task_reset(idle); ++ kasan_unpoison_task_stack(idle); ++ ++#ifdef CONFIG_SMP ++ /* ++ * It's possible that init_idle() gets called multiple times on a task, ++ * in that case do_set_cpus_allowed() will not do the right thing. ++ * ++ * And since this is boot we can forgo the serialisation. ++ */ ++ set_cpus_allowed_common(idle, cpumask_of(cpu)); ++#endif ++ ++ /* Silence PROVE_RCU */ ++ rcu_read_lock(); ++ __set_task_cpu(idle, cpu); ++ rcu_read_unlock(); ++ ++ rq->idle = idle; ++ rcu_assign_pointer(rq->curr, idle); ++ idle->on_cpu = 1; ++ ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock_irqrestore(&idle->pi_lock, flags); ++ ++ /* Set the preempt count _outside_ the spinlocks! */ ++ init_idle_preempt_count(idle, cpu); ++ ++ ftrace_graph_init_idle_task(idle, cpu); ++ vtime_init_idle(idle, cpu); ++#ifdef CONFIG_SMP ++ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); ++#endif ++} ++ ++#ifdef CONFIG_SMP ++ ++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur, ++ const struct cpumask __maybe_unused *trial) ++{ ++ return 1; ++} ++ ++int task_can_attach(struct task_struct *p, ++ const struct cpumask *cs_cpus_allowed) ++{ ++ int ret = 0; ++ ++ /* ++ * Kthreads which disallow setaffinity shouldn't be moved ++ * to a new cpuset; we don't want to change their CPU ++ * affinity and isolating such threads by their set of ++ * allowed nodes is unnecessary. Thus, cpusets are not ++ * applicable for such threads. This prevents checking for ++ * success of set_cpus_allowed_ptr() on all attached tasks ++ * before cpus_mask may be changed. ++ */ ++ if (p->flags & PF_NO_SETAFFINITY) ++ ret = -EINVAL; ++ ++ return ret; ++} ++ ++bool sched_smp_initialized __read_mostly; ++ ++#ifdef CONFIG_HOTPLUG_CPU ++/* ++ * Ensures that the idle task is using init_mm right before its CPU goes ++ * offline. ++ */ ++void idle_task_exit(void) ++{ ++ struct mm_struct *mm = current->active_mm; ++ ++ BUG_ON(current != this_rq()->idle); ++ ++ if (mm != &init_mm) { ++ switch_mm(mm, &init_mm, current); ++ finish_arch_post_lock_switch(); ++ } ++ ++ /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ ++} ++ ++static int __balance_push_cpu_stop(void *arg) ++{ ++ struct task_struct *p = arg; ++ struct rq *rq = this_rq(); ++ struct rq_flags rf; ++ int cpu; ++ ++ raw_spin_lock_irq(&p->pi_lock); ++ rq_lock(rq, &rf); ++ ++ update_rq_clock(rq); ++ ++ if (task_rq(p) == rq && task_on_rq_queued(p)) { ++ cpu = select_fallback_rq(rq->cpu, p); ++ rq = __migrate_task(rq, p, cpu); ++ } ++ ++ rq_unlock(rq, &rf); ++ raw_spin_unlock_irq(&p->pi_lock); ++ ++ put_task_struct(p); ++ ++ return 0; ++} ++ ++static DEFINE_PER_CPU(struct cpu_stop_work, push_work); ++ ++/* ++ * Ensure we only run per-cpu kthreads once the CPU goes !active. ++ */ ++static void balance_push(struct rq *rq) ++{ ++ struct task_struct *push_task = rq->curr; ++ ++ lockdep_assert_held(&rq->lock); ++ SCHED_WARN_ON(rq->cpu != smp_processor_id()); ++ /* ++ * Ensure the thing is persistent until balance_push_set(.on = false); ++ */ ++ rq->balance_callback = &balance_push_callback; ++ ++ /* ++ * Both the cpu-hotplug and stop task are in this case and are ++ * required to complete the hotplug process. ++ * ++ * XXX: the idle task does not match kthread_is_per_cpu() due to ++ * histerical raisins. ++ */ ++ if (rq->idle == push_task || ++ ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) || ++ is_migration_disabled(push_task)) { ++ ++ /* ++ * If this is the idle task on the outgoing CPU try to wake ++ * up the hotplug control thread which might wait for the ++ * last task to vanish. The rcuwait_active() check is ++ * accurate here because the waiter is pinned on this CPU ++ * and can't obviously be running in parallel. ++ * ++ * On RT kernels this also has to check whether there are ++ * pinned and scheduled out tasks on the runqueue. They ++ * need to leave the migrate disabled section first. ++ */ ++ if (!rq->nr_running && !rq_has_pinned_tasks(rq) && ++ rcuwait_active(&rq->hotplug_wait)) { ++ raw_spin_unlock(&rq->lock); ++ rcuwait_wake_up(&rq->hotplug_wait); ++ raw_spin_lock(&rq->lock); ++ } ++ return; ++ } ++ ++ get_task_struct(push_task); ++ /* ++ * Temporarily drop rq->lock such that we can wake-up the stop task. ++ * Both preemption and IRQs are still disabled. ++ */ ++ raw_spin_unlock(&rq->lock); ++ stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, ++ this_cpu_ptr(&push_work)); ++ /* ++ * At this point need_resched() is true and we'll take the loop in ++ * schedule(). The next pick is obviously going to be the stop task ++ * which kthread_is_per_cpu() and will push this task away. ++ */ ++ raw_spin_lock(&rq->lock); ++} ++ ++static void balance_push_set(int cpu, bool on) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ struct rq_flags rf; ++ ++ rq_lock_irqsave(rq, &rf); ++ rq->balance_push = on; ++ if (on) { ++ WARN_ON_ONCE(rq->balance_callback); ++ rq->balance_callback = &balance_push_callback; ++ } else if (rq->balance_callback == &balance_push_callback) { ++ rq->balance_callback = NULL; ++ } ++ rq_unlock_irqrestore(rq, &rf); ++} ++ ++/* ++ * Invoked from a CPUs hotplug control thread after the CPU has been marked ++ * inactive. All tasks which are not per CPU kernel threads are either ++ * pushed off this CPU now via balance_push() or placed on a different CPU ++ * during wakeup. Wait until the CPU is quiescent. ++ */ ++static void balance_hotplug_wait(void) ++{ ++ struct rq *rq = this_rq(); ++ ++ rcuwait_wait_event(&rq->hotplug_wait, ++ rq->nr_running == 1 && !rq_has_pinned_tasks(rq), ++ TASK_UNINTERRUPTIBLE); ++} ++ ++#else ++ ++static void balance_push(struct rq *rq) ++{ ++} ++ ++static void balance_push_set(int cpu, bool on) ++{ ++} ++ ++static inline void balance_hotplug_wait(void) ++{ ++} ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++static void set_rq_offline(struct rq *rq) ++{ ++ if (rq->online) ++ rq->online = false; ++} ++ ++static void set_rq_online(struct rq *rq) ++{ ++ if (!rq->online) ++ rq->online = true; ++} ++ ++/* ++ * used to mark begin/end of suspend/resume: ++ */ ++static int num_cpus_frozen; ++ ++/* ++ * Update cpusets according to cpu_active mask. If cpusets are ++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper ++ * around partition_sched_domains(). ++ * ++ * If we come here as part of a suspend/resume, don't touch cpusets because we ++ * want to restore it back to its original state upon resume anyway. ++ */ ++static void cpuset_cpu_active(void) ++{ ++ if (cpuhp_tasks_frozen) { ++ /* ++ * num_cpus_frozen tracks how many CPUs are involved in suspend ++ * resume sequence. As long as this is not the last online ++ * operation in the resume sequence, just build a single sched ++ * domain, ignoring cpusets. ++ */ ++ partition_sched_domains(1, NULL, NULL); ++ if (--num_cpus_frozen) ++ return; ++ /* ++ * This is the last CPU online operation. So fall through and ++ * restore the original sched domains by considering the ++ * cpuset configurations. ++ */ ++ cpuset_force_rebuild(); ++ } ++ ++ cpuset_update_active_cpus(); ++} ++ ++static int cpuset_cpu_inactive(unsigned int cpu) ++{ ++ if (!cpuhp_tasks_frozen) { ++ cpuset_update_active_cpus(); ++ } else { ++ num_cpus_frozen++; ++ partition_sched_domains(1, NULL, NULL); ++ } ++ return 0; ++} ++ ++int sched_cpu_activate(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ /* ++ * Make sure that when the hotplug state machine does a roll-back ++ * we clear balance_push. Ideally that would happen earlier... ++ */ ++ balance_push_set(cpu, false); ++ ++#ifdef CONFIG_SCHED_SMT ++ /* ++ * When going up, increment the number of cores with SMT present. ++ */ ++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) ++ static_branch_inc_cpuslocked(&sched_smt_present); ++#endif ++ set_cpu_active(cpu, true); ++ ++ if (sched_smp_initialized) ++ cpuset_cpu_active(); ++ ++ /* ++ * Put the rq online, if not already. This happens: ++ * ++ * 1) In the early boot process, because we build the real domains ++ * after all cpus have been brought up. ++ * ++ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the ++ * domains. ++ */ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ set_rq_online(rq); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++ return 0; ++} ++ ++int sched_cpu_deactivate(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ int ret; ++ ++ set_cpu_active(cpu, false); ++ ++ /* ++ * From this point forward, this CPU will refuse to run any task that ++ * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively ++ * push those tasks away until this gets cleared, see ++ * sched_cpu_dying(). ++ */ ++ balance_push_set(cpu, true); ++ ++ /* ++ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU ++ * users of this state to go away such that all new such users will ++ * observe it. ++ * ++ * Specifically, we rely on ttwu to no longer target this CPU, see ++ * ttwu_queue_cond() and is_cpu_allowed(). ++ * ++ * Do sync before park smpboot threads to take care the rcu boost case. ++ */ ++ synchronize_rcu(); ++ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ update_rq_clock(rq); ++ set_rq_offline(rq); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++#ifdef CONFIG_SCHED_SMT ++ /* ++ * When going down, decrement the number of cores with SMT present. ++ */ ++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) { ++ static_branch_dec_cpuslocked(&sched_smt_present); ++ if (!static_branch_likely(&sched_smt_present)) ++ cpumask_clear(&sched_sg_idle_mask); ++ } ++#endif ++ ++ if (!sched_smp_initialized) ++ return 0; ++ ++ ret = cpuset_cpu_inactive(cpu); ++ if (ret) { ++ balance_push_set(cpu, false); ++ set_cpu_active(cpu, true); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void sched_rq_cpu_starting(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ rq->calc_load_update = calc_load_update; ++} ++ ++int sched_cpu_starting(unsigned int cpu) ++{ ++ sched_rq_cpu_starting(cpu); ++ sched_tick_start(cpu); ++ return 0; ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++ ++/* ++ * Invoked immediately before the stopper thread is invoked to bring the ++ * CPU down completely. At this point all per CPU kthreads except the ++ * hotplug thread (current) and the stopper thread (inactive) have been ++ * either parked or have been unbound from the outgoing CPU. Ensure that ++ * any of those which might be on the way out are gone. ++ * ++ * If after this point a bound task is being woken on this CPU then the ++ * responsible hotplug callback has failed to do it's job. ++ * sched_cpu_dying() will catch it with the appropriate fireworks. ++ */ ++int sched_cpu_wait_empty(unsigned int cpu) ++{ ++ balance_hotplug_wait(); ++ return 0; ++} ++ ++/* ++ * Since this CPU is going 'away' for a while, fold any nr_active delta we ++ * might have. Called from the CPU stopper task after ensuring that the ++ * stopper is the last running task on the CPU, so nr_active count is ++ * stable. We need to take the teardown thread which is calling this into ++ * account, so we hand in adjust = 1 to the load calculation. ++ * ++ * Also see the comment "Global load-average calculations". ++ */ ++static void calc_load_migrate(struct rq *rq) ++{ ++ long delta = calc_load_fold_active(rq, 1); ++ ++ if (delta) ++ atomic_long_add(delta, &calc_load_tasks); ++} ++ ++static void dump_rq_tasks(struct rq *rq, const char *loglvl) ++{ ++ struct task_struct *g, *p; ++ int cpu = cpu_of(rq); ++ ++ lockdep_assert_held(&rq->lock); ++ ++ printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); ++ for_each_process_thread(g, p) { ++ if (task_cpu(p) != cpu) ++ continue; ++ ++ if (!task_on_rq_queued(p)) ++ continue; ++ ++ printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); ++ } ++} ++ ++int sched_cpu_dying(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ /* Handle pending wakeups and then migrate everything off */ ++ sched_tick_stop(cpu); ++ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { ++ WARN(true, "Dying CPU not properly vacated!"); ++ dump_rq_tasks(rq, KERN_WARNING); ++ } ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++ /* ++ * Now that the CPU is offline, make sure we're welcome ++ * to new tasks once we come back up. ++ */ ++ balance_push_set(cpu, false); ++ ++ calc_load_migrate(rq); ++ hrtick_clear(rq); ++ return 0; ++} ++#endif ++ ++#ifdef CONFIG_SMP ++static void sched_init_topology_cpumask_early(void) ++{ ++ int cpu; ++ cpumask_t *tmp; ++ ++ for_each_possible_cpu(cpu) { ++ /* init affinity masks */ ++ tmp = per_cpu(sched_cpu_affinity_masks, cpu); ++ ++ cpumask_copy(tmp, cpumask_of(cpu)); ++ tmp++; ++ cpumask_copy(tmp, cpu_possible_mask); ++ cpumask_clear_cpu(cpu, tmp); ++ per_cpu(sched_cpu_affinity_end_mask, cpu) = ++tmp; ++ /* init topo masks */ ++ tmp = per_cpu(sched_cpu_topo_masks, cpu); ++ ++ cpumask_copy(tmp, cpumask_of(cpu)); ++ tmp++; ++ cpumask_copy(tmp, cpu_possible_mask); ++ per_cpu(sched_cpu_llc_mask, cpu) = tmp; ++ /*per_cpu(sd_llc_id, cpu) = cpu;*/ ++ } ++} ++ ++#define TOPOLOGY_CPUMASK(name, mask, last) \ ++ if (cpumask_and(chk, chk, mask)) { \ ++ cpumask_copy(topo, mask); \ ++ printk(KERN_INFO "sched: cpu#%02d affinity: 0x%08lx topo: 0x%08lx - "#name,\ ++ cpu, (chk++)->bits[0], (topo++)->bits[0]); \ ++ } \ ++ if (!last) \ ++ cpumask_complement(chk, mask) ++ ++static void sched_init_topology_cpumask(void) ++{ ++ int cpu; ++ cpumask_t *chk, *topo; ++ ++ for_each_online_cpu(cpu) { ++ /* take chance to reset time slice for idle tasks */ ++ cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns; ++ ++ chk = per_cpu(sched_cpu_affinity_masks, cpu) + 1; ++ topo = per_cpu(sched_cpu_topo_masks, cpu) + 1; ++ ++ cpumask_complement(chk, cpumask_of(cpu)); ++#ifdef CONFIG_SCHED_SMT ++ TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false); ++#endif ++ per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu)); ++ per_cpu(sched_cpu_llc_mask, cpu) = topo; ++ TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false); ++ ++ TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false); ++ ++ TOPOLOGY_CPUMASK(others, cpu_online_mask, true); ++ ++ per_cpu(sched_cpu_affinity_end_mask, cpu) = chk; ++ printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n", ++ cpu, per_cpu(sd_llc_id, cpu), ++ (int) (per_cpu(sched_cpu_llc_mask, cpu) - ++ per_cpu(sched_cpu_topo_masks, cpu))); ++ } ++} ++#endif ++ ++void __init sched_init_smp(void) ++{ ++ /* Move init over to a non-isolated CPU */ ++ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) ++ BUG(); ++ ++ sched_init_topology_cpumask(); ++ ++ sched_smp_initialized = true; ++} ++#else ++void __init sched_init_smp(void) ++{ ++ cpu_rq(0)->idle->time_slice = sched_timeslice_ns; ++} ++#endif /* CONFIG_SMP */ ++ ++int in_sched_functions(unsigned long addr) ++{ ++ return in_lock_functions(addr) || ++ (addr >= (unsigned long)__sched_text_start ++ && addr < (unsigned long)__sched_text_end); ++} ++ ++#ifdef CONFIG_CGROUP_SCHED ++/* task group related information */ ++struct task_group { ++ struct cgroup_subsys_state css; ++ ++ struct rcu_head rcu; ++ struct list_head list; ++ ++ struct task_group *parent; ++ struct list_head siblings; ++ struct list_head children; ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ unsigned long shares; ++#endif ++}; ++ ++/* ++ * Default task group. ++ * Every task in system belongs to this group at bootup. ++ */ ++struct task_group root_task_group; ++LIST_HEAD(task_groups); ++ ++/* Cacheline aligned slab cache for task_group */ ++static struct kmem_cache *task_group_cache __read_mostly; ++#endif /* CONFIG_CGROUP_SCHED */ ++ ++void __init sched_init(void) ++{ ++ int i; ++ struct rq *rq; ++ ++ printk(KERN_INFO ALT_SCHED_VERSION_MSG); ++ ++ wait_bit_init(); ++ ++#ifdef CONFIG_SMP ++ for (i = 0; i < SCHED_BITS; i++) ++ cpumask_copy(&sched_rq_watermark[i], cpu_present_mask); ++#endif ++ ++#ifdef CONFIG_CGROUP_SCHED ++ task_group_cache = KMEM_CACHE(task_group, 0); ++ ++ list_add(&root_task_group.list, &task_groups); ++ INIT_LIST_HEAD(&root_task_group.children); ++ INIT_LIST_HEAD(&root_task_group.siblings); ++#endif /* CONFIG_CGROUP_SCHED */ ++ for_each_possible_cpu(i) { ++ rq = cpu_rq(i); ++ ++ sched_queue_init(rq); ++ rq->watermark = IDLE_WM; ++ rq->skip = NULL; ++ ++ raw_spin_lock_init(&rq->lock); ++ rq->nr_running = rq->nr_uninterruptible = 0; ++ rq->calc_load_active = 0; ++ rq->calc_load_update = jiffies + LOAD_FREQ; ++#ifdef CONFIG_SMP ++ rq->online = false; ++ rq->cpu = i; ++ ++#ifdef CONFIG_SCHED_SMT ++ rq->active_balance = 0; ++#endif ++ ++#ifdef CONFIG_NO_HZ_COMMON ++ INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); ++#endif ++ rq->balance_callback = NULL; ++#ifdef CONFIG_HOTPLUG_CPU ++ rcuwait_init(&rq->hotplug_wait); ++#endif ++#endif /* CONFIG_SMP */ ++ rq->nr_switches = 0; ++ ++ hrtick_rq_init(rq); ++ atomic_set(&rq->nr_iowait, 0); ++ } ++#ifdef CONFIG_SMP ++ /* Set rq->online for cpu 0 */ ++ cpu_rq(0)->online = true; ++#endif ++ /* ++ * The boot idle thread does lazy MMU switching as well: ++ */ ++ mmgrab(&init_mm); ++ enter_lazy_tlb(&init_mm, current); ++ ++ /* ++ * Make us the idle thread. Technically, schedule() should not be ++ * called from this thread, however somewhere below it might be, ++ * but because we are the idle thread, we just pick up running again ++ * when this runqueue becomes "idle". ++ */ ++ init_idle(current, smp_processor_id()); ++ ++ calc_load_update = jiffies + LOAD_FREQ; ++ ++#ifdef CONFIG_SMP ++ idle_thread_set_boot_cpu(); ++ ++ sched_init_topology_cpumask_early(); ++#endif /* SMP */ ++ ++ init_schedstats(); ++ ++ psi_init(); ++} ++ ++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP ++static inline int preempt_count_equals(int preempt_offset) ++{ ++ int nested = preempt_count() + rcu_preempt_depth(); ++ ++ return (nested == preempt_offset); ++} ++ ++void __might_sleep(const char *file, int line, int preempt_offset) ++{ ++ /* ++ * Blocking primitives will set (and therefore destroy) current->state, ++ * since we will exit with TASK_RUNNING make sure we enter with it, ++ * otherwise we will destroy state. ++ */ ++ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, ++ "do not call blocking ops when !TASK_RUNNING; " ++ "state=%lx set at [<%p>] %pS\n", ++ current->state, ++ (void *)current->task_state_change, ++ (void *)current->task_state_change); ++ ++ ___might_sleep(file, line, preempt_offset); ++} ++EXPORT_SYMBOL(__might_sleep); ++ ++void ___might_sleep(const char *file, int line, int preempt_offset) ++{ ++ /* Ratelimiting timestamp: */ ++ static unsigned long prev_jiffy; ++ ++ unsigned long preempt_disable_ip; ++ ++ /* WARN_ON_ONCE() by default, no rate limit required: */ ++ rcu_sleep_check(); ++ ++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && ++ !is_idle_task(current) && !current->non_block_count) || ++ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || ++ oops_in_progress) ++ return; ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ /* Save this before calling printk(), since that will clobber it: */ ++ preempt_disable_ip = get_preempt_disable_ip(current); ++ ++ printk(KERN_ERR ++ "BUG: sleeping function called from invalid context at %s:%d\n", ++ file, line); ++ printk(KERN_ERR ++ "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), current->non_block_count, ++ current->pid, current->comm); ++ ++ if (task_stack_end_corrupted(current)) ++ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); ++ ++ debug_show_held_locks(current); ++ if (irqs_disabled()) ++ print_irqtrace_events(current); ++#ifdef CONFIG_DEBUG_PREEMPT ++ if (!preempt_count_equals(preempt_offset)) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(KERN_ERR, preempt_disable_ip); ++ } ++#endif ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++EXPORT_SYMBOL(___might_sleep); ++ ++void __cant_sleep(const char *file, int line, int preempt_offset) ++{ ++ static unsigned long prev_jiffy; ++ ++ if (irqs_disabled()) ++ return; ++ ++ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) ++ return; ++ ++ if (preempt_count() > preempt_offset) ++ return; ++ ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); ++ printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), ++ current->pid, current->comm); ++ ++ debug_show_held_locks(current); ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++EXPORT_SYMBOL_GPL(__cant_sleep); ++ ++#ifdef CONFIG_SMP ++void __cant_migrate(const char *file, int line) ++{ ++ static unsigned long prev_jiffy; ++ ++ if (irqs_disabled()) ++ return; ++ ++ if (is_migration_disabled(current)) ++ return; ++ ++ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) ++ return; ++ ++ if (preempt_count() > 0) ++ return; ++ ++ if (current->migration_flags & MDF_FORCE_ENABLED) ++ return; ++ ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ pr_err("BUG: assuming non migratable context at %s:%d\n", file, line); ++ pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), is_migration_disabled(current), ++ current->pid, current->comm); ++ ++ debug_show_held_locks(current); ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++EXPORT_SYMBOL_GPL(__cant_migrate); ++#endif ++#endif ++ ++#ifdef CONFIG_MAGIC_SYSRQ ++void normalize_rt_tasks(void) ++{ ++ struct task_struct *g, *p; ++ struct sched_attr attr = { ++ .sched_policy = SCHED_NORMAL, ++ }; ++ ++ read_lock(&tasklist_lock); ++ for_each_process_thread(g, p) { ++ /* ++ * Only normalize user tasks: ++ */ ++ if (p->flags & PF_KTHREAD) ++ continue; ++ ++ if (!rt_task(p)) { ++ /* ++ * Renice negative nice level userspace ++ * tasks back to 0: ++ */ ++ if (task_nice(p) < 0) ++ set_user_nice(p, 0); ++ continue; ++ } ++ ++ __sched_setscheduler(p, &attr, false, false); ++ } ++ read_unlock(&tasklist_lock); ++} ++#endif /* CONFIG_MAGIC_SYSRQ */ ++ ++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) ++/* ++ * These functions are only useful for the IA64 MCA handling, or kdb. ++ * ++ * They can only be called when the whole system has been ++ * stopped - every CPU needs to be quiescent, and no scheduling ++ * activity can take place. Using them for anything else would ++ * be a serious bug, and as a result, they aren't even visible ++ * under any other configuration. ++ */ ++ ++/** ++ * curr_task - return the current task for a given CPU. ++ * @cpu: the processor in question. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ * ++ * Return: The current task for @cpu. ++ */ ++struct task_struct *curr_task(int cpu) ++{ ++ return cpu_curr(cpu); ++} ++ ++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ ++ ++#ifdef CONFIG_IA64 ++/** ++ * ia64_set_curr_task - set the current task for a given CPU. ++ * @cpu: the processor in question. ++ * @p: the task pointer to set. ++ * ++ * Description: This function must only be used when non-maskable interrupts ++ * are serviced on a separate stack. It allows the architecture to switch the ++ * notion of the current task on a CPU in a non-blocking manner. This function ++ * must be called with all CPU's synchronised, and interrupts disabled, the ++ * and caller must save the original value of the current task (see ++ * curr_task() above) and restore that value before reenabling interrupts and ++ * re-starting the system. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ */ ++void ia64_set_curr_task(int cpu, struct task_struct *p) ++{ ++ cpu_curr(cpu) = p; ++} ++ ++#endif ++ ++#ifdef CONFIG_CGROUP_SCHED ++static void sched_free_group(struct task_group *tg) ++{ ++ kmem_cache_free(task_group_cache, tg); ++} ++ ++/* allocate runqueue etc for a new task group */ ++struct task_group *sched_create_group(struct task_group *parent) ++{ ++ struct task_group *tg; ++ ++ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); ++ if (!tg) ++ return ERR_PTR(-ENOMEM); ++ ++ return tg; ++} ++ ++void sched_online_group(struct task_group *tg, struct task_group *parent) ++{ ++} ++ ++/* rcu callback to free various structures associated with a task group */ ++static void sched_free_group_rcu(struct rcu_head *rhp) ++{ ++ /* Now it should be safe to free those cfs_rqs */ ++ sched_free_group(container_of(rhp, struct task_group, rcu)); ++} ++ ++void sched_destroy_group(struct task_group *tg) ++{ ++ /* Wait for possible concurrent references to cfs_rqs complete */ ++ call_rcu(&tg->rcu, sched_free_group_rcu); ++} ++ ++void sched_offline_group(struct task_group *tg) ++{ ++} ++ ++static inline struct task_group *css_tg(struct cgroup_subsys_state *css) ++{ ++ return css ? container_of(css, struct task_group, css) : NULL; ++} ++ ++static struct cgroup_subsys_state * ++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) ++{ ++ struct task_group *parent = css_tg(parent_css); ++ struct task_group *tg; ++ ++ if (!parent) { ++ /* This is early initialization for the top cgroup */ ++ return &root_task_group.css; ++ } ++ ++ tg = sched_create_group(parent); ++ if (IS_ERR(tg)) ++ return ERR_PTR(-ENOMEM); ++ return &tg->css; ++} ++ ++/* Expose task group only after completing cgroup initialization */ ++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ struct task_group *parent = css_tg(css->parent); ++ ++ if (parent) ++ sched_online_group(tg, parent); ++ return 0; ++} ++ ++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ ++ sched_offline_group(tg); ++} ++ ++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ ++ /* ++ * Relies on the RCU grace period between css_released() and this. ++ */ ++ sched_free_group(tg); ++} ++ ++static void cpu_cgroup_fork(struct task_struct *task) ++{ ++} ++ ++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) ++{ ++ return 0; ++} ++ ++static void cpu_cgroup_attach(struct cgroup_taskset *tset) ++{ ++} ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++static DEFINE_MUTEX(shares_mutex); ++ ++int sched_group_set_shares(struct task_group *tg, unsigned long shares) ++{ ++ /* ++ * We can't change the weight of the root cgroup. ++ */ ++ if (&root_task_group == tg) ++ return -EINVAL; ++ ++ shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); ++ ++ mutex_lock(&shares_mutex); ++ if (tg->shares == shares) ++ goto done; ++ ++ tg->shares = shares; ++done: ++ mutex_unlock(&shares_mutex); ++ return 0; ++} ++ ++static int cpu_shares_write_u64(struct cgroup_subsys_state *css, ++ struct cftype *cftype, u64 shareval) ++{ ++ if (shareval > scale_load_down(ULONG_MAX)) ++ shareval = MAX_SHARES; ++ return sched_group_set_shares(css_tg(css), scale_load(shareval)); ++} ++ ++static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, ++ struct cftype *cft) ++{ ++ struct task_group *tg = css_tg(css); ++ ++ return (u64) scale_load_down(tg->shares); ++} ++#endif ++ ++static struct cftype cpu_legacy_files[] = { ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ { ++ .name = "shares", ++ .read_u64 = cpu_shares_read_u64, ++ .write_u64 = cpu_shares_write_u64, ++ }, ++#endif ++ { } /* Terminate */ ++}; ++ ++ ++static struct cftype cpu_files[] = { ++ { } /* terminate */ ++}; ++ ++static int cpu_extra_stat_show(struct seq_file *sf, ++ struct cgroup_subsys_state *css) ++{ ++ return 0; ++} ++ ++struct cgroup_subsys cpu_cgrp_subsys = { ++ .css_alloc = cpu_cgroup_css_alloc, ++ .css_online = cpu_cgroup_css_online, ++ .css_released = cpu_cgroup_css_released, ++ .css_free = cpu_cgroup_css_free, ++ .css_extra_stat_show = cpu_extra_stat_show, ++ .fork = cpu_cgroup_fork, ++ .can_attach = cpu_cgroup_can_attach, ++ .attach = cpu_cgroup_attach, ++ .legacy_cftypes = cpu_files, ++ .legacy_cftypes = cpu_legacy_files, ++ .dfl_cftypes = cpu_files, ++ .early_init = true, ++ .threaded = true, ++}; ++#endif /* CONFIG_CGROUP_SCHED */ ++ ++#undef CREATE_TRACE_POINTS +diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c +new file mode 100644 +index 000000000000..1212a031700e +--- /dev/null ++++ b/kernel/sched/alt_debug.c +@@ -0,0 +1,31 @@ ++/* ++ * kernel/sched/alt_debug.c ++ * ++ * Print the alt scheduler debugging details ++ * ++ * Author: Alfred Chen ++ * Date : 2020 ++ */ ++#include "sched.h" ++ ++/* ++ * This allows printing both to /proc/sched_debug and ++ * to the console ++ */ ++#define SEQ_printf(m, x...) \ ++ do { \ ++ if (m) \ ++ seq_printf(m, x); \ ++ else \ ++ pr_cont(x); \ ++ } while (0) ++ ++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, ++ struct seq_file *m) ++{ ++ SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns), ++ get_nr_threads(p)); ++} ++ ++void proc_sched_set_task(struct task_struct *p) ++{} +diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h +new file mode 100644 +index 000000000000..2a6a0530fbb7 +--- /dev/null ++++ b/kernel/sched/alt_sched.h +@@ -0,0 +1,694 @@ ++#ifndef ALT_SCHED_H ++#define ALT_SCHED_H ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#ifdef CONFIG_PARAVIRT ++# include ++#endif ++ ++#include "cpupri.h" ++ ++#include ++ ++#ifdef CONFIG_SCHED_BMQ ++#include "bmq.h" ++#endif ++#ifdef CONFIG_SCHED_PDS ++#include "pds.h" ++#endif ++ ++#ifdef CONFIG_SCHED_DEBUG ++# define SCHED_WARN_ON(x) WARN_ONCE(x, #x) ++#else ++# define SCHED_WARN_ON(x) ({ (void)(x), 0; }) ++#endif ++ ++/* ++ * Increase resolution of nice-level calculations for 64-bit architectures. ++ * The extra resolution improves shares distribution and load balancing of ++ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup ++ * hierarchies, especially on larger systems. This is not a user-visible change ++ * and does not change the user-interface for setting shares/weights. ++ * ++ * We increase resolution only if we have enough bits to allow this increased ++ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit ++ * are pretty high and the returns do not justify the increased costs. ++ * ++ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to ++ * increase coverage and consistency always enable it on 64-bit platforms. ++ */ ++#ifdef CONFIG_64BIT ++# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) ++# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) ++# define scale_load_down(w) \ ++({ \ ++ unsigned long __w = (w); \ ++ if (__w) \ ++ __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ ++ __w; \ ++}) ++#else ++# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) ++# define scale_load(w) (w) ++# define scale_load_down(w) (w) ++#endif ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD ++ ++/* ++ * A weight of 0 or 1 can cause arithmetics problems. ++ * A weight of a cfs_rq is the sum of weights of which entities ++ * are queued on this cfs_rq, so a weight of a entity should not be ++ * too large, so as the shares value of a task group. ++ * (The default weight is 1024 - so there's no practical ++ * limitation from this.) ++ */ ++#define MIN_SHARES (1UL << 1) ++#define MAX_SHARES (1UL << 18) ++#endif ++ ++/* task_struct::on_rq states: */ ++#define TASK_ON_RQ_QUEUED 1 ++#define TASK_ON_RQ_MIGRATING 2 ++ ++static inline int task_on_rq_queued(struct task_struct *p) ++{ ++ return p->on_rq == TASK_ON_RQ_QUEUED; ++} ++ ++static inline int task_on_rq_migrating(struct task_struct *p) ++{ ++ return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; ++} ++ ++/* ++ * wake flags ++ */ ++#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ ++#define WF_FORK 0x02 /* child wakeup after fork */ ++#define WF_MIGRATED 0x04 /* internal use, task got migrated */ ++#define WF_ON_CPU 0x08 /* Wakee is on_rq */ ++ ++/* ++ * This is the main, per-CPU runqueue data structure. ++ * This data should only be modified by the local cpu. ++ */ ++struct rq { ++ /* runqueue lock: */ ++ raw_spinlock_t lock; ++ ++ struct task_struct __rcu *curr; ++ struct task_struct *idle, *stop, *skip; ++ struct mm_struct *prev_mm; ++ ++#ifdef CONFIG_SCHED_BMQ ++ struct bmq queue; ++#endif ++#ifdef CONFIG_SCHED_PDS ++ struct skiplist_node sl_header; ++#endif ++ unsigned long watermark; ++ ++ /* switch count */ ++ u64 nr_switches; ++ ++ atomic_t nr_iowait; ++ ++#ifdef CONFIG_MEMBARRIER ++ int membarrier_state; ++#endif ++ ++#ifdef CONFIG_SMP ++ int cpu; /* cpu of this runqueue */ ++ bool online; ++ ++ unsigned int ttwu_pending; ++ unsigned char nohz_idle_balance; ++ unsigned char idle_balance; ++ ++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ ++ struct sched_avg avg_irq; ++#endif ++ ++#ifdef CONFIG_SCHED_SMT ++ int active_balance; ++ struct cpu_stop_work active_balance_work; ++#endif ++ struct callback_head *balance_callback; ++ unsigned char balance_push; ++#ifdef CONFIG_HOTPLUG_CPU ++ struct rcuwait hotplug_wait; ++#endif ++ unsigned int nr_pinned; ++#endif /* CONFIG_SMP */ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ u64 prev_irq_time; ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++#ifdef CONFIG_PARAVIRT ++ u64 prev_steal_time; ++#endif /* CONFIG_PARAVIRT */ ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ u64 prev_steal_time_rq; ++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */ ++ ++ /* calc_load related fields */ ++ unsigned long calc_load_update; ++ long calc_load_active; ++ ++ u64 clock, last_tick; ++ u64 last_ts_switch; ++ u64 clock_task; ++ ++ unsigned int nr_running; ++ unsigned long nr_uninterruptible; ++ ++#ifdef CONFIG_SCHED_HRTICK ++#ifdef CONFIG_SMP ++ call_single_data_t hrtick_csd; ++#endif ++ struct hrtimer hrtick_timer; ++ ktime_t hrtick_time; ++#endif ++ ++#ifdef CONFIG_SCHEDSTATS ++ ++ /* latency stats */ ++ struct sched_info rq_sched_info; ++ unsigned long long rq_cpu_time; ++ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ ++ ++ /* sys_sched_yield() stats */ ++ unsigned int yld_count; ++ ++ /* schedule() stats */ ++ unsigned int sched_switch; ++ unsigned int sched_count; ++ unsigned int sched_goidle; ++ ++ /* try_to_wake_up() stats */ ++ unsigned int ttwu_count; ++ unsigned int ttwu_local; ++#endif /* CONFIG_SCHEDSTATS */ ++ ++#ifdef CONFIG_CPU_IDLE ++ /* Must be inspected within a rcu lock section */ ++ struct cpuidle_state *idle_state; ++#endif ++ ++#ifdef CONFIG_NO_HZ_COMMON ++#ifdef CONFIG_SMP ++ call_single_data_t nohz_csd; ++#endif ++ atomic_t nohz_flags; ++#endif /* CONFIG_NO_HZ_COMMON */ ++}; ++ ++extern unsigned long calc_load_update; ++extern atomic_long_t calc_load_tasks; ++ ++extern void calc_global_load_tick(struct rq *this_rq); ++extern long calc_load_fold_active(struct rq *this_rq, long adjust); ++ ++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) ++#define this_rq() this_cpu_ptr(&runqueues) ++#define task_rq(p) cpu_rq(task_cpu(p)) ++#define cpu_curr(cpu) (cpu_rq(cpu)->curr) ++#define raw_rq() raw_cpu_ptr(&runqueues) ++ ++#ifdef CONFIG_SMP ++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) ++void register_sched_domain_sysctl(void); ++void unregister_sched_domain_sysctl(void); ++#else ++static inline void register_sched_domain_sysctl(void) ++{ ++} ++static inline void unregister_sched_domain_sysctl(void) ++{ ++} ++#endif ++ ++extern bool sched_smp_initialized; ++ ++enum { ++ ITSELF_LEVEL_SPACE_HOLDER, ++#ifdef CONFIG_SCHED_SMT ++ SMT_LEVEL_SPACE_HOLDER, ++#endif ++ COREGROUP_LEVEL_SPACE_HOLDER, ++ CORE_LEVEL_SPACE_HOLDER, ++ OTHER_LEVEL_SPACE_HOLDER, ++ NR_CPU_AFFINITY_LEVELS ++}; ++ ++DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks); ++DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask); ++ ++static inline int __best_mask_cpu(int cpu, const cpumask_t *cpumask, ++ const cpumask_t *mask) ++{ ++#if NR_CPUS <= 64 ++ unsigned long t; ++ ++ while ((t = cpumask->bits[0] & mask->bits[0]) == 0UL) ++ mask++; ++ ++ return __ffs(t); ++#else ++ while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids) ++ mask++; ++ return cpu; ++#endif ++} ++ ++static inline int best_mask_cpu(int cpu, const cpumask_t *mask) ++{ ++#if NR_CPUS <= 64 ++ unsigned long llc_match; ++ cpumask_t *chk = per_cpu(sched_cpu_llc_mask, cpu); ++ ++ if ((llc_match = mask->bits[0] & chk->bits[0])) { ++ unsigned long match; ++ ++ chk = per_cpu(sched_cpu_topo_masks, cpu); ++ if (mask->bits[0] & chk->bits[0]) ++ return cpu; ++ ++#ifdef CONFIG_SCHED_SMT ++ chk++; ++ if ((match = mask->bits[0] & chk->bits[0])) ++ return __ffs(match); ++#endif ++ ++ return __ffs(llc_match); ++ } ++ ++ return __best_mask_cpu(cpu, mask, chk + 1); ++#else ++ cpumask_t llc_match; ++ cpumask_t *chk = per_cpu(sched_cpu_llc_mask, cpu); ++ ++ if (cpumask_and(&llc_match, mask, chk)) { ++ cpumask_t tmp; ++ ++ chk = per_cpu(sched_cpu_topo_masks, cpu); ++ if (cpumask_test_cpu(cpu, mask)) ++ return cpu; ++ ++#ifdef CONFIG_SCHED_SMT ++ chk++; ++ if (cpumask_and(&tmp, mask, chk)) ++ return cpumask_any(&tmp); ++#endif ++ ++ return cpumask_any(&llc_match); ++ } ++ ++ return __best_mask_cpu(cpu, mask, chk + 1); ++#endif ++} ++ ++extern void flush_smp_call_function_from_idle(void); ++ ++#else /* !CONFIG_SMP */ ++static inline void flush_smp_call_function_from_idle(void) { } ++#endif ++ ++#ifndef arch_scale_freq_tick ++static __always_inline ++void arch_scale_freq_tick(void) ++{ ++} ++#endif ++ ++#ifndef arch_scale_freq_capacity ++static __always_inline ++unsigned long arch_scale_freq_capacity(int cpu) ++{ ++ return SCHED_CAPACITY_SCALE; ++} ++#endif ++ ++static inline u64 __rq_clock_broken(struct rq *rq) ++{ ++ return READ_ONCE(rq->clock); ++} ++ ++static inline u64 rq_clock(struct rq *rq) ++{ ++ /* ++ * Relax lockdep_assert_held() checking as in VRQ, call to ++ * sched_info_xxxx() may not held rq->lock ++ * lockdep_assert_held(&rq->lock); ++ */ ++ return rq->clock; ++} ++ ++static inline u64 rq_clock_task(struct rq *rq) ++{ ++ /* ++ * Relax lockdep_assert_held() checking as in VRQ, call to ++ * sched_info_xxxx() may not held rq->lock ++ * lockdep_assert_held(&rq->lock); ++ */ ++ return rq->clock_task; ++} ++ ++/* ++ * {de,en}queue flags: ++ * ++ * DEQUEUE_SLEEP - task is no longer runnable ++ * ENQUEUE_WAKEUP - task just became runnable ++ * ++ */ ++ ++#define DEQUEUE_SLEEP 0x01 ++ ++#define ENQUEUE_WAKEUP 0x01 ++ ++ ++/* ++ * Below are scheduler API which using in other kernel code ++ * It use the dummy rq_flags ++ * ToDo : BMQ need to support these APIs for compatibility with mainline ++ * scheduler code. ++ */ ++struct rq_flags { ++ unsigned long flags; ++}; ++ ++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(rq->lock); ++ ++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(p->pi_lock) ++ __acquires(rq->lock); ++ ++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock(&rq->lock); ++} ++ ++static inline void ++task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) ++ __releases(rq->lock) ++ __releases(p->pi_lock) ++{ ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); ++} ++ ++static inline void ++rq_lock(struct rq *rq, struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ raw_spin_lock(&rq->lock); ++} ++ ++static inline void ++rq_unlock_irq(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock_irq(&rq->lock); ++} ++ ++static inline void ++rq_unlock(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock(&rq->lock); ++} ++ ++static inline struct rq * ++this_rq_lock_irq(struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ local_irq_disable(); ++ rq = this_rq(); ++ raw_spin_lock(&rq->lock); ++ ++ return rq; ++} ++ ++static inline int task_current(struct rq *rq, struct task_struct *p) ++{ ++ return rq->curr == p; ++} ++ ++static inline bool task_running(struct task_struct *p) ++{ ++ return p->on_cpu; ++} ++ ++extern int task_running_nice(struct task_struct *p); ++ ++extern struct static_key_false sched_schedstats; ++ ++#ifdef CONFIG_CPU_IDLE ++static inline void idle_set_state(struct rq *rq, ++ struct cpuidle_state *idle_state) ++{ ++ rq->idle_state = idle_state; ++} ++ ++static inline struct cpuidle_state *idle_get_state(struct rq *rq) ++{ ++ WARN_ON(!rcu_read_lock_held()); ++ return rq->idle_state; ++} ++#else ++static inline void idle_set_state(struct rq *rq, ++ struct cpuidle_state *idle_state) ++{ ++} ++ ++static inline struct cpuidle_state *idle_get_state(struct rq *rq) ++{ ++ return NULL; ++} ++#endif ++ ++static inline int cpu_of(const struct rq *rq) ++{ ++#ifdef CONFIG_SMP ++ return rq->cpu; ++#else ++ return 0; ++#endif ++} ++ ++#include "stats.h" ++ ++#ifdef CONFIG_NO_HZ_COMMON ++#define NOHZ_BALANCE_KICK_BIT 0 ++#define NOHZ_STATS_KICK_BIT 1 ++#define NOHZ_NEWILB_KICK_BIT 2 ++ ++#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) ++#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) ++#define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT) ++ ++#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) ++ ++#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) ++ ++/* TODO: needed? ++extern void nohz_balance_exit_idle(struct rq *rq); ++#else ++static inline void nohz_balance_exit_idle(struct rq *rq) { } ++*/ ++#endif ++ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++struct irqtime { ++ u64 total; ++ u64 tick_delta; ++ u64 irq_start_time; ++ struct u64_stats_sync sync; ++}; ++ ++DECLARE_PER_CPU(struct irqtime, cpu_irqtime); ++ ++/* ++ * Returns the irqtime minus the softirq time computed by ksoftirqd. ++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime ++ * and never move forward. ++ */ ++static inline u64 irq_time_read(int cpu) ++{ ++ struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); ++ unsigned int seq; ++ u64 total; ++ ++ do { ++ seq = __u64_stats_fetch_begin(&irqtime->sync); ++ total = irqtime->total; ++ } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); ++ ++ return total; ++} ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++ ++#ifdef CONFIG_CPU_FREQ ++DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); ++ ++/** ++ * cpufreq_update_util - Take a note about CPU utilization changes. ++ * @rq: Runqueue to carry out the update for. ++ * @flags: Update reason flags. ++ * ++ * This function is called by the scheduler on the CPU whose utilization is ++ * being updated. ++ * ++ * It can only be called from RCU-sched read-side critical sections. ++ * ++ * The way cpufreq is currently arranged requires it to evaluate the CPU ++ * performance state (frequency/voltage) on a regular basis to prevent it from ++ * being stuck in a completely inadequate performance level for too long. ++ * That is not guaranteed to happen if the updates are only triggered from CFS ++ * and DL, though, because they may not be coming in if only RT tasks are ++ * active all the time (or there are RT tasks only). ++ * ++ * As a workaround for that issue, this function is called periodically by the ++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling, ++ * but that really is a band-aid. Going forward it should be replaced with ++ * solutions targeted more specifically at RT tasks. ++ */ ++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) ++{ ++ struct update_util_data *data; ++ ++ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, ++ cpu_of(rq))); ++ if (data) ++ data->func(data, rq_clock(rq), flags); ++} ++#else ++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} ++#endif /* CONFIG_CPU_FREQ */ ++ ++#ifdef CONFIG_NO_HZ_FULL ++extern int __init sched_tick_offload_init(void); ++#else ++static inline int sched_tick_offload_init(void) { return 0; } ++#endif ++ ++#ifdef arch_scale_freq_capacity ++#ifndef arch_scale_freq_invariant ++#define arch_scale_freq_invariant() (true) ++#endif ++#else /* arch_scale_freq_capacity */ ++#define arch_scale_freq_invariant() (false) ++#endif ++ ++#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) ++extern void nohz_run_idle_balance(int cpu); ++#else ++static inline void nohz_run_idle_balance(int cpu) { } ++#endif ++ ++extern void schedule_idle(void); ++ ++#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) ++ ++/* ++ * !! For sched_setattr_nocheck() (kernel) only !! ++ * ++ * This is actually gross. :( ++ * ++ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE ++ * tasks, but still be able to sleep. We need this on platforms that cannot ++ * atomically change clock frequency. Remove once fast switching will be ++ * available on such platforms. ++ * ++ * SUGOV stands for SchedUtil GOVernor. ++ */ ++#define SCHED_FLAG_SUGOV 0x10000000 ++ ++#ifdef CONFIG_MEMBARRIER ++/* ++ * The scheduler provides memory barriers required by membarrier between: ++ * - prior user-space memory accesses and store to rq->membarrier_state, ++ * - store to rq->membarrier_state and following user-space memory accesses. ++ * In the same way it provides those guarantees around store to rq->curr. ++ */ ++static inline void membarrier_switch_mm(struct rq *rq, ++ struct mm_struct *prev_mm, ++ struct mm_struct *next_mm) ++{ ++ int membarrier_state; ++ ++ if (prev_mm == next_mm) ++ return; ++ ++ membarrier_state = atomic_read(&next_mm->membarrier_state); ++ if (READ_ONCE(rq->membarrier_state) == membarrier_state) ++ return; ++ ++ WRITE_ONCE(rq->membarrier_state, membarrier_state); ++} ++#else ++static inline void membarrier_switch_mm(struct rq *rq, ++ struct mm_struct *prev_mm, ++ struct mm_struct *next_mm) ++{ ++} ++#endif ++ ++#ifdef CONFIG_NUMA ++extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); ++#else ++static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) ++{ ++ return nr_cpu_ids; ++} ++#endif ++ ++void swake_up_all_locked(struct swait_queue_head *q); ++void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); ++ ++#endif /* ALT_SCHED_H */ +diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h +new file mode 100644 +index 000000000000..aba3c98759f8 +--- /dev/null ++++ b/kernel/sched/bmq.h +@@ -0,0 +1,14 @@ ++#ifndef BMQ_H ++#define BMQ_H ++ ++/* bits: ++ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */ ++#define SCHED_BITS (MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1) ++#define IDLE_TASK_SCHED_PRIO (SCHED_BITS - 1) ++ ++struct bmq { ++ DECLARE_BITMAP(bitmap, SCHED_BITS); ++ struct list_head heads[SCHED_BITS]; ++}; ++ ++#endif +diff --git a/kernel/sched/bmq_imp.h b/kernel/sched/bmq_imp.h +new file mode 100644 +index 000000000000..7c71f1141d00 +--- /dev/null ++++ b/kernel/sched/bmq_imp.h +@@ -0,0 +1,203 @@ ++#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n" ++ ++/* ++ * BMQ only routines ++ */ ++#define rq_switch_time(rq) ((rq)->clock - (rq)->last_ts_switch) ++#define boost_threshold(p) (sched_timeslice_ns >>\ ++ (15 - MAX_PRIORITY_ADJ - (p)->boost_prio)) ++ ++static inline void boost_task(struct task_struct *p) ++{ ++ int limit; ++ ++ switch (p->policy) { ++ case SCHED_NORMAL: ++ limit = -MAX_PRIORITY_ADJ; ++ break; ++ case SCHED_BATCH: ++ case SCHED_IDLE: ++ limit = 0; ++ break; ++ default: ++ return; ++ } ++ ++ if (p->boost_prio > limit) ++ p->boost_prio--; ++} ++ ++static inline void deboost_task(struct task_struct *p) ++{ ++ if (p->boost_prio < MAX_PRIORITY_ADJ) ++ p->boost_prio++; ++} ++ ++/* ++ * Common interfaces ++ */ ++static inline int normal_prio(struct task_struct *p) ++{ ++ if (task_has_rt_policy(p)) ++ return MAX_RT_PRIO - 1 - p->rt_priority; ++ ++ return p->static_prio + MAX_PRIORITY_ADJ; ++} ++ ++static inline int task_sched_prio(struct task_struct *p, struct rq *rq) ++{ ++ return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2; ++} ++ ++static inline void requeue_task(struct task_struct *p, struct rq *rq); ++ ++static inline void time_slice_expired(struct task_struct *p, struct rq *rq) ++{ ++ p->time_slice = sched_timeslice_ns; ++ ++ if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) { ++ if (SCHED_RR != p->policy) ++ deboost_task(p); ++ requeue_task(p, rq); ++ } ++} ++ ++inline int task_running_nice(struct task_struct *p) ++{ ++ return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ); ++} ++ ++static inline void update_task_priodl(struct task_struct *p) {} ++ ++static inline unsigned long sched_queue_watermark(struct rq *rq) ++{ ++ return find_first_bit(rq->queue.bitmap, SCHED_BITS); ++} ++ ++static inline void sched_queue_init(struct rq *rq) ++{ ++ struct bmq *q = &rq->queue; ++ int i; ++ ++ bitmap_zero(q->bitmap, SCHED_BITS); ++ for(i = 0; i < SCHED_BITS; i++) ++ INIT_LIST_HEAD(&q->heads[i]); ++} ++ ++static inline void sched_queue_init_idle(struct rq *rq, struct task_struct *idle) ++{ ++ struct bmq *q = &rq->queue; ++ ++ idle->bmq_idx = IDLE_TASK_SCHED_PRIO; ++ INIT_LIST_HEAD(&q->heads[idle->bmq_idx]); ++ list_add(&idle->bmq_node, &q->heads[idle->bmq_idx]); ++ set_bit(idle->bmq_idx, q->bitmap); ++} ++ ++/* ++ * This routine used in bmq scheduler only which assume the idle task in the bmq ++ */ ++static inline struct task_struct *sched_rq_first_task(struct rq *rq) ++{ ++ unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_BITS); ++ const struct list_head *head = &rq->queue.heads[idx]; ++ ++ return list_first_entry(head, struct task_struct, bmq_node); ++} ++ ++static inline struct task_struct * ++sched_rq_next_task(struct task_struct *p, struct rq *rq) ++{ ++ unsigned long idx = p->bmq_idx; ++ struct list_head *head = &rq->queue.heads[idx]; ++ ++ if (list_is_last(&p->bmq_node, head)) { ++ idx = find_next_bit(rq->queue.bitmap, SCHED_BITS, idx + 1); ++ head = &rq->queue.heads[idx]; ++ ++ return list_first_entry(head, struct task_struct, bmq_node); ++ } ++ ++ return list_next_entry(p, bmq_node); ++} ++ ++#define __SCHED_DEQUEUE_TASK(p, rq, flags, func) \ ++ psi_dequeue(p, flags & DEQUEUE_SLEEP); \ ++ sched_info_dequeued(rq, p); \ ++ \ ++ list_del(&p->bmq_node); \ ++ if (list_empty(&rq->queue.heads[p->bmq_idx])) { \ ++ clear_bit(p->bmq_idx, rq->queue.bitmap);\ ++ func; \ ++ } ++ ++#define __SCHED_ENQUEUE_TASK(p, rq, flags) \ ++ sched_info_queued(rq, p); \ ++ psi_enqueue(p, flags); \ ++ \ ++ p->bmq_idx = task_sched_prio(p, rq); \ ++ list_add_tail(&p->bmq_node, &rq->queue.heads[p->bmq_idx]); \ ++ set_bit(p->bmq_idx, rq->queue.bitmap) ++ ++#define __SCHED_REQUEUE_TASK(p, rq, func) \ ++{ \ ++ int idx = task_sched_prio(p, rq); \ ++\ ++ list_del(&p->bmq_node); \ ++ list_add_tail(&p->bmq_node, &rq->queue.heads[idx]); \ ++ if (idx != p->bmq_idx) { \ ++ if (list_empty(&rq->queue.heads[p->bmq_idx])) \ ++ clear_bit(p->bmq_idx, rq->queue.bitmap); \ ++ p->bmq_idx = idx; \ ++ set_bit(p->bmq_idx, rq->queue.bitmap); \ ++ func; \ ++ } \ ++} ++ ++static inline bool sched_task_need_requeue(struct task_struct *p, struct rq *rq) ++{ ++ return (task_sched_prio(p, rq) != p->bmq_idx); ++} ++ ++static void sched_task_fork(struct task_struct *p, struct rq *rq) ++{ ++ p->boost_prio = (p->boost_prio < 0) ? ++ p->boost_prio + MAX_PRIORITY_ADJ : MAX_PRIORITY_ADJ; ++} ++ ++/** ++ * task_prio - return the priority value of a given task. ++ * @p: the task in question. ++ * ++ * Return: The priority value as seen by users in /proc. ++ * ++ * sched policy return value kernel prio user prio/nice/boost ++ * ++ * normal, batch, idle [0 ... 53] [100 ... 139] 0/[-20 ... 19]/[-7 ... 7] ++ * fifo, rr [-1 ... -100] [99 ... 0] [0 ... 99] ++ */ ++int task_prio(const struct task_struct *p) ++{ ++ if (p->prio < MAX_RT_PRIO) ++ return (p->prio - MAX_RT_PRIO); ++ return (p->prio - MAX_RT_PRIO + p->boost_prio); ++} ++ ++static void do_sched_yield_type_1(struct task_struct *p, struct rq *rq) ++{ ++ p->boost_prio = MAX_PRIORITY_ADJ; ++} ++ ++#ifdef CONFIG_SMP ++static void sched_task_ttwu(struct task_struct *p) ++{ ++ if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns) ++ boost_task(p); ++} ++#endif ++ ++static void sched_task_deactivate(struct task_struct *p, struct rq *rq) ++{ ++ if (rq_switch_time(rq) < boost_threshold(p)) ++ boost_task(p); ++} +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c +index 50cbad89f7fa..41946f19468b 100644 +--- a/kernel/sched/cpufreq_schedutil.c ++++ b/kernel/sched/cpufreq_schedutil.c +@@ -57,6 +57,13 @@ struct sugov_cpu { + unsigned long bw_dl; + unsigned long max; + ++#ifdef CONFIG_SCHED_ALT ++ /* For genenal cpu load util */ ++ s32 load_history; ++ u64 load_block; ++ u64 load_stamp; ++#endif ++ + /* The field below is for single-CPU policies only: */ + #ifdef CONFIG_NO_HZ_COMMON + unsigned long saved_idle_calls; +@@ -171,6 +178,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, + return cpufreq_driver_resolve_freq(policy, freq); + } + ++#ifndef CONFIG_SCHED_ALT + static void sugov_get_util(struct sugov_cpu *sg_cpu) + { + struct rq *rq = cpu_rq(sg_cpu->cpu); +@@ -182,6 +190,55 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) + FREQUENCY_UTIL, NULL); + } + ++#else /* CONFIG_SCHED_ALT */ ++ ++#define SG_CPU_LOAD_HISTORY_BITS (sizeof(s32) * 8ULL) ++#define SG_CPU_UTIL_SHIFT (8) ++#define SG_CPU_LOAD_HISTORY_SHIFT (SG_CPU_LOAD_HISTORY_BITS - 1 - SG_CPU_UTIL_SHIFT) ++#define SG_CPU_LOAD_HISTORY_TO_UTIL(l) (((l) >> SG_CPU_LOAD_HISTORY_SHIFT) & 0xff) ++ ++#define LOAD_BLOCK(t) ((t) >> 17) ++#define LOAD_HALF_BLOCK(t) ((t) >> 16) ++#define BLOCK_MASK(t) ((t) & ((0x01 << 18) - 1)) ++#define LOAD_BLOCK_BIT(b) (1UL << (SG_CPU_LOAD_HISTORY_BITS - 1 - (b))) ++#define CURRENT_LOAD_BIT LOAD_BLOCK_BIT(0) ++ ++static void sugov_get_util(struct sugov_cpu *sg_cpu) ++{ ++ unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); ++ ++ sg_cpu->max = max; ++ sg_cpu->bw_dl = 0; ++ sg_cpu->util = SG_CPU_LOAD_HISTORY_TO_UTIL(sg_cpu->load_history) * ++ (max >> SG_CPU_UTIL_SHIFT); ++} ++ ++static inline void sugov_cpu_load_update(struct sugov_cpu *sg_cpu, u64 time) ++{ ++ u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(sg_cpu->load_stamp), ++ SG_CPU_LOAD_HISTORY_BITS - 1); ++ u64 prev = !!(sg_cpu->load_history & CURRENT_LOAD_BIT); ++ u64 curr = !!cpu_rq(sg_cpu->cpu)->nr_running; ++ ++ if (delta) { ++ sg_cpu->load_history = sg_cpu->load_history >> delta; ++ ++ if (delta <= SG_CPU_UTIL_SHIFT) { ++ sg_cpu->load_block += (~BLOCK_MASK(sg_cpu->load_stamp)) * prev; ++ if (!!LOAD_HALF_BLOCK(sg_cpu->load_block) ^ curr) ++ sg_cpu->load_history ^= LOAD_BLOCK_BIT(delta); ++ } ++ ++ sg_cpu->load_block = BLOCK_MASK(time) * prev; ++ } else { ++ sg_cpu->load_block += (time - sg_cpu->load_stamp) * prev; ++ } ++ if (prev ^ curr) ++ sg_cpu->load_history ^= CURRENT_LOAD_BIT; ++ sg_cpu->load_stamp = time; ++} ++#endif /* CONFIG_SCHED_ALT */ ++ + /** + * sugov_iowait_reset() - Reset the IO boost status of a CPU. + * @sg_cpu: the sugov data for the CPU to boost +@@ -322,13 +379,19 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } + */ + static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) + { ++#ifndef CONFIG_SCHED_ALT + if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) + sg_cpu->sg_policy->limits_changed = true; ++#endif + } + + static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, + u64 time, unsigned int flags) + { ++#ifdef CONFIG_SCHED_ALT ++ sugov_cpu_load_update(sg_cpu, time); ++#endif /* CONFIG_SCHED_ALT */ ++ + sugov_iowait_boost(sg_cpu, time, flags); + sg_cpu->last_update = time; + +@@ -446,6 +509,10 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) + + raw_spin_lock(&sg_policy->update_lock); + ++#ifdef CONFIG_SCHED_ALT ++ sugov_cpu_load_update(sg_cpu, time); ++#endif /* CONFIG_SCHED_ALT */ ++ + sugov_iowait_boost(sg_cpu, time, flags); + sg_cpu->last_update = time; + +@@ -603,6 +670,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) + } + + ret = sched_setattr_nocheck(thread, &attr); ++ + if (ret) { + kthread_stop(thread); + pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); +@@ -837,7 +905,9 @@ cpufreq_governor_init(schedutil_gov); + #ifdef CONFIG_ENERGY_MODEL + static void rebuild_sd_workfn(struct work_struct *work) + { ++#ifndef CONFIG_SCHED_ALT + rebuild_sched_domains_energy(); ++#endif /* CONFIG_SCHED_ALT */ + } + static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); + +diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c +index 5f611658eeab..631276f56ba0 100644 +--- a/kernel/sched/cputime.c ++++ b/kernel/sched/cputime.c +@@ -123,7 +123,7 @@ void account_user_time(struct task_struct *p, u64 cputime) + p->utime += cputime; + account_group_user_time(p, cputime); + +- index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; ++ index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER; + + /* Add user time to cpustat. */ + task_group_account_field(p, index, cputime); +@@ -147,7 +147,7 @@ void account_guest_time(struct task_struct *p, u64 cputime) + p->gtime += cputime; + + /* Add guest time to cpustat. */ +- if (task_nice(p) > 0) { ++ if (task_running_nice(p)) { + cpustat[CPUTIME_NICE] += cputime; + cpustat[CPUTIME_GUEST_NICE] += cputime; + } else { +@@ -270,7 +270,7 @@ static inline u64 account_other_time(u64 max) + #ifdef CONFIG_64BIT + static inline u64 read_sum_exec_runtime(struct task_struct *t) + { +- return t->se.sum_exec_runtime; ++ return tsk_seruntime(t); + } + #else + static u64 read_sum_exec_runtime(struct task_struct *t) +@@ -280,7 +280,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t) + struct rq *rq; + + rq = task_rq_lock(t, &rf); +- ns = t->se.sum_exec_runtime; ++ ns = tsk_seruntime(t); + task_rq_unlock(rq, t, &rf); + + return ns; +@@ -612,7 +612,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, + void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) + { + struct task_cputime cputime = { +- .sum_exec_runtime = p->se.sum_exec_runtime, ++ .sum_exec_runtime = tsk_seruntime(p), + }; + + task_cputime(p, &cputime.utime, &cputime.stime); +diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c +index 7199e6f23789..bbdd227da3a4 100644 +--- a/kernel/sched/idle.c ++++ b/kernel/sched/idle.c +@@ -261,12 +261,6 @@ exit_idle: + static void do_idle(void) + { + int cpu = smp_processor_id(); +- +- /* +- * Check if we need to update blocked load +- */ +- nohz_run_idle_balance(cpu); +- + /* + * If the arch has a polling bit, we maintain an invariant: + * +@@ -397,6 +397,7 @@ void cpu_startup_entry(enum cpuhp_state state) + do_idle(); + } + ++#ifndef CONFIG_SCHED_ALT + /* + * idle-task scheduling class. + */ +@@ -510,3 +511,4 @@ DEFINE_SCHED_CLASS(idle) = { + .switched_to = switched_to_idle, + .update_curr = update_curr_idle, + }; ++#endif +diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h +new file mode 100644 +index 000000000000..623908cf4380 +--- /dev/null ++++ b/kernel/sched/pds.h +@@ -0,0 +1,9 @@ ++#ifndef PDS_H ++#define PDS_H ++ ++/* bits: ++ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */ ++#define SCHED_BITS (MAX_RT_PRIO + NICE_WIDTH / 2 + 1) ++#define IDLE_TASK_SCHED_PRIO (SCHED_BITS - 1) ++ ++#endif +diff --git a/kernel/sched/pds_imp.h b/kernel/sched/pds_imp.h +new file mode 100644 +index 000000000000..335ce3a8e3ec +--- /dev/null ++++ b/kernel/sched/pds_imp.h +@@ -0,0 +1,279 @@ ++#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n" ++ ++static const u64 user_prio2deadline[NICE_WIDTH] = { ++/* -20 */ 4194304, 4613734, 5075107, 5582617, 6140878, ++/* -15 */ 6754965, 7430461, 8173507, 8990857, 9889942, ++/* -10 */ 10878936, 11966829, 13163511, 14479862, 15927848, ++/* -5 */ 17520632, 19272695, 21199964, 23319960, 25651956, ++/* 0 */ 28217151, 31038866, 34142752, 37557027, 41312729, ++/* 5 */ 45444001, 49988401, 54987241, 60485965, 66534561, ++/* 10 */ 73188017, 80506818, 88557499, 97413248, 107154572, ++/* 15 */ 117870029, 129657031, 142622734, 156885007, 172573507 ++}; ++ ++static const unsigned char dl_level_map[] = { ++/* 0 4 8 12 */ ++ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 18, ++/* 16 20 24 28 */ ++ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 17, 17, 17, 17, 17, ++/* 32 36 40 44 */ ++ 17, 17, 17, 17, 16, 16, 16, 16, 16, 16, 16, 16, 15, 15, 15, 15, ++/* 48 52 56 60 */ ++ 15, 15, 15, 14, 14, 14, 14, 14, 14, 13, 13, 13, 13, 12, 12, 12, ++/* 64 68 72 76 */ ++ 12, 11, 11, 11, 10, 10, 10, 9, 9, 8, 7, 6, 5, 4, 3, 2, ++/* 80 84 88 92 */ ++ 1, 0 ++}; ++ ++/* DEFAULT_SCHED_PRIO: ++ * dl_level_map[(user_prio2deadline[39] - user_prio2deadline[0]) >> 21] = ++ * dl_level_map[68] = ++ * 10 ++ */ ++#define DEFAULT_SCHED_PRIO (MAX_RT_PRIO + 10) ++ ++static inline int normal_prio(struct task_struct *p) ++{ ++ if (task_has_rt_policy(p)) ++ return MAX_RT_PRIO - 1 - p->rt_priority; ++ ++ return MAX_RT_PRIO; ++} ++ ++static inline int ++task_sched_prio(const struct task_struct *p, const struct rq *rq) ++{ ++ size_t delta; ++ ++ if (p == rq->idle) ++ return IDLE_TASK_SCHED_PRIO; ++ ++ if (p->prio < MAX_RT_PRIO) ++ return p->prio; ++ ++ delta = (rq->clock + user_prio2deadline[39] - p->deadline) >> 21; ++ delta = min((size_t)delta, ARRAY_SIZE(dl_level_map) - 1); ++ ++ return MAX_RT_PRIO + dl_level_map[delta]; ++} ++ ++int task_running_nice(struct task_struct *p) ++{ ++ return task_sched_prio(p, task_rq(p)) > DEFAULT_SCHED_PRIO; ++} ++ ++static inline void update_task_priodl(struct task_struct *p) ++{ ++ p->priodl = (((u64) (p->prio))<<56) | ((p->deadline)>>8); ++} ++ ++static inline void requeue_task(struct task_struct *p, struct rq *rq); ++ ++static inline void time_slice_expired(struct task_struct *p, struct rq *rq) ++{ ++ /*printk(KERN_INFO "sched: time_slice_expired(%d) - %px\n", cpu_of(rq), p);*/ ++ p->time_slice = sched_timeslice_ns; ++ ++ if (p->prio >= MAX_RT_PRIO) ++ p->deadline = rq->clock + ++ user_prio2deadline[p->static_prio - MAX_RT_PRIO]; ++ update_task_priodl(p); ++ ++ if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) ++ requeue_task(p, rq); ++} ++ ++/* ++ * pds_skiplist_task_search -- search function used in PDS run queue skip list ++ * node insert operation. ++ * @it: iterator pointer to the node in the skip list ++ * @node: pointer to the skiplist_node to be inserted ++ * ++ * Returns true if key of @it is less or equal to key value of @node, otherwise ++ * false. ++ */ ++static inline bool ++pds_skiplist_task_search(struct skiplist_node *it, struct skiplist_node *node) ++{ ++ return (skiplist_entry(it, struct task_struct, sl_node)->priodl <= ++ skiplist_entry(node, struct task_struct, sl_node)->priodl); ++} ++ ++/* ++ * Define the skip list insert function for PDS ++ */ ++DEFINE_SKIPLIST_INSERT_FUNC(pds_skiplist_insert, pds_skiplist_task_search); ++ ++/* ++ * Init the queue structure in rq ++ */ ++static inline void sched_queue_init(struct rq *rq) ++{ ++ INIT_SKIPLIST_NODE(&rq->sl_header); ++} ++ ++/* ++ * Init idle task and put into queue structure of rq ++ * IMPORTANT: may be called multiple times for a single cpu ++ */ ++static inline void sched_queue_init_idle(struct rq *rq, struct task_struct *idle) ++{ ++ /*printk(KERN_INFO "sched: init(%d) - %px\n", cpu_of(rq), idle);*/ ++ int default_prio = idle->prio; ++ ++ idle->prio = MAX_PRIO; ++ idle->deadline = 0ULL; ++ update_task_priodl(idle); ++ ++ INIT_SKIPLIST_NODE(&rq->sl_header); ++ ++ idle->sl_node.level = idle->sl_level; ++ pds_skiplist_insert(&rq->sl_header, &idle->sl_node); ++ ++ idle->prio = default_prio; ++} ++ ++/* ++ * This routine assume that the idle task always in queue ++ */ ++static inline struct task_struct *sched_rq_first_task(struct rq *rq) ++{ ++ struct skiplist_node *node = rq->sl_header.next[0]; ++ ++ BUG_ON(node == &rq->sl_header); ++ return skiplist_entry(node, struct task_struct, sl_node); ++} ++ ++static inline struct task_struct * ++sched_rq_next_task(struct task_struct *p, struct rq *rq) ++{ ++ struct skiplist_node *next = p->sl_node.next[0]; ++ ++ BUG_ON(next == &rq->sl_header); ++ return skiplist_entry(next, struct task_struct, sl_node); ++} ++ ++static inline unsigned long sched_queue_watermark(struct rq *rq) ++{ ++ return task_sched_prio(sched_rq_first_task(rq), rq); ++} ++ ++#define __SCHED_DEQUEUE_TASK(p, rq, flags, func) \ ++ psi_dequeue(p, flags & DEQUEUE_SLEEP); \ ++ sched_info_dequeued(rq, p); \ ++ \ ++ if (skiplist_del_init(&rq->sl_header, &p->sl_node)) { \ ++ func; \ ++ } ++ ++#define __SCHED_ENQUEUE_TASK(p, rq, flags) \ ++ sched_info_queued(rq, p); \ ++ psi_enqueue(p, flags); \ ++ \ ++ p->sl_node.level = p->sl_level; \ ++ pds_skiplist_insert(&rq->sl_header, &p->sl_node) ++ ++/* ++ * Requeue a task @p to @rq ++ */ ++#define __SCHED_REQUEUE_TASK(p, rq, func) \ ++{\ ++ bool b_first = skiplist_del_init(&rq->sl_header, &p->sl_node); \ ++\ ++ p->sl_node.level = p->sl_level; \ ++ if (pds_skiplist_insert(&rq->sl_header, &p->sl_node) || b_first) { \ ++ func; \ ++ } \ ++} ++ ++static inline bool sched_task_need_requeue(struct task_struct *p, struct rq *rq) ++{ ++ struct skiplist_node *node; ++ ++ node = p->sl_node.prev[0]; ++ if (node != &rq->sl_header && ++ skiplist_entry(node, struct task_struct, sl_node)->priodl > p->priodl) ++ return true; ++ ++ node = p->sl_node.next[0]; ++ if (node != &rq->sl_header && ++ skiplist_entry(node, struct task_struct, sl_node)->priodl < p->priodl) ++ return true; ++ ++ return false; ++} ++ ++/* ++ * pds_skiplist_random_level -- Returns a pseudo-random level number for skip ++ * list node which is used in PDS run queue. ++ * ++ * __ffs() is used to satisfy p = 0.5 between each levels, and there should be ++ * platform instruction(known as ctz/clz) for acceleration. ++ * ++ * The skiplist level for a task is populated when task is created and doesn't ++ * change in task's life time. When task is being inserted into run queue, this ++ * skiplist level is set to task's sl_node->level, the skiplist insert function ++ * may change it based on current level of the skip lsit. ++ */ ++static inline int pds_skiplist_random_level(const struct task_struct *p) ++{ ++ /* ++ * 1. Some architectures don't have better than microsecond resolution ++ * so mask out ~microseconds as a factor of the random seed for skiplist ++ * insertion. ++ * 2. Use address of task structure pointer as another factor of the ++ * random seed for task burst forking scenario. ++ */ ++ unsigned long randseed = (task_rq(p)->clock ^ (unsigned long)p) >> 10; ++ ++ randseed &= __GENMASK(NUM_SKIPLIST_LEVEL - 1, 0); ++ if (randseed) ++ return __ffs(randseed); ++ ++ return (NUM_SKIPLIST_LEVEL - 1); ++} ++ ++static void sched_task_fork(struct task_struct *p, struct rq *rq) ++{ ++ p->sl_level = pds_skiplist_random_level(p); ++ if (p->prio >= MAX_RT_PRIO) ++ p->deadline = rq->clock + ++ user_prio2deadline[p->static_prio - MAX_RT_PRIO]; ++ update_task_priodl(p); ++} ++ ++/** ++ * task_prio - return the priority value of a given task. ++ * @p: the task in question. ++ * ++ * Return: The priority value as seen by users in /proc. ++ * ++ * sched policy return value kernel prio user prio/nice ++ * ++ * normal, batch, idle [0 ... 39] 100 0/[-20 ... 19] ++ * fifo, rr [-1 ... -100] [99 ... 0] [0 ... 99] ++ */ ++int task_prio(const struct task_struct *p) ++{ ++ int ret; ++ ++ if (p->prio < MAX_RT_PRIO) ++ return (p->prio - MAX_RT_PRIO); ++ ++ preempt_disable(); ++ ret = task_sched_prio(p, this_rq()) - MAX_RT_PRIO; ++ preempt_enable(); ++ ++ return ret; ++} ++ ++static void do_sched_yield_type_1(struct task_struct *p, struct rq *rq) ++{ ++ time_slice_expired(p, rq); ++} ++ ++#ifdef CONFIG_SMP ++static void sched_task_ttwu(struct task_struct *p) {} ++#endif ++static void sched_task_deactivate(struct task_struct *p, struct rq *rq) {} +diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c +index 2c613e1cff3a..0103b2a7201d 100644 +--- a/kernel/sched/pelt.c ++++ b/kernel/sched/pelt.c +@@ -270,6 +270,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load) + WRITE_ONCE(sa->util_avg, sa->util_sum / divider); + } + ++#ifndef CONFIG_SCHED_ALT + /* + * sched_entity: + * +@@ -387,8 +388,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) + + return 0; + } ++#endif + +-#ifdef CONFIG_SCHED_THERMAL_PRESSURE ++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT) + /* + * thermal: + * +diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h +index 795e43e02afc..856163dac896 100644 +--- a/kernel/sched/pelt.h ++++ b/kernel/sched/pelt.h +@@ -1,13 +1,15 @@ + #ifdef CONFIG_SMP + #include "sched-pelt.h" + ++#ifndef CONFIG_SCHED_ALT + int __update_load_avg_blocked_se(u64 now, struct sched_entity *se); + int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se); + int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq); + int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); + int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); ++#endif + +-#ifdef CONFIG_SCHED_THERMAL_PRESSURE ++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT) + int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity); + + static inline u64 thermal_load_avg(struct rq *rq) +@@ -42,6 +44,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg) + return LOAD_AVG_MAX - 1024 + avg->period_contrib; + } + ++#ifndef CONFIG_SCHED_ALT + /* + * When a task is dequeued, its estimated utilization should not be update if + * its util_avg has not been updated at least once. +@@ -162,9 +165,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) + return rq_clock_pelt(rq_of(cfs_rq)); + } + #endif ++#endif /* CONFIG_SCHED_ALT */ + + #else + ++#ifndef CONFIG_SCHED_ALT + static inline int + update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) + { +@@ -182,6 +187,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running) + { + return 0; + } ++#endif + + static inline int + update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 10a1522b1e30..1a74a266340b 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -2,6 +2,10 @@ + /* + * Scheduler internal types and methods: + */ ++#ifdef CONFIG_SCHED_ALT ++#include "alt_sched.h" ++#else ++ + #include + + #include +@@ -2720,3 +2724,9 @@ static inline bool is_per_cpu_kthread(struct task_struct *p) + extern void sched_dynamic_update(int mode); + #endif + ++ ++static inline int task_running_nice(struct task_struct *p) ++{ ++ return (task_nice(p) > 0); ++} ++#endif /* !CONFIG_SCHED_ALT */ +diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c +index 750fb3c67eed..108422ebc7bf 100644 +--- a/kernel/sched/stats.c ++++ b/kernel/sched/stats.c +@@ -22,8 +22,10 @@ static int show_schedstat(struct seq_file *seq, void *v) + } else { + struct rq *rq; + #ifdef CONFIG_SMP ++#ifndef CONFIG_SCHED_ALT + struct sched_domain *sd; + int dcount = 0; ++#endif + #endif + cpu = (unsigned long)(v - 2); + rq = cpu_rq(cpu); +@@ -40,6 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v) + seq_printf(seq, "\n"); + + #ifdef CONFIG_SMP ++#ifndef CONFIG_SCHED_ALT + /* domain-specific stats */ + rcu_read_lock(); + for_each_domain(cpu, sd) { +@@ -68,6 +71,7 @@ static int show_schedstat(struct seq_file *seq, void *v) + sd->ttwu_move_balance); + } + rcu_read_unlock(); ++#endif + #endif + } + return 0; +diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c +index 09d35044bd88..2c146b042b51 100644 +--- a/kernel/sched/topology.c ++++ b/kernel/sched/topology.c +@@ -4,6 +4,7 @@ + */ + #include "sched.h" + ++#ifndef CONFIG_SCHED_ALT + DEFINE_MUTEX(sched_domains_mutex); + + /* Protected by sched_domains_mutex: */ +@@ -1241,8 +1242,10 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) + */ + + static int default_relax_domain_level = -1; ++#endif /* CONFIG_SCHED_ALT */ + int sched_domain_level_max; + ++#ifndef CONFIG_SCHED_ALT + static int __init setup_relax_domain_level(char *str) + { + if (kstrtoint(str, 0, &default_relax_domain_level)) +@@ -1472,6 +1475,7 @@ sd_init(struct sched_domain_topology_level *tl, + + return sd; + } ++#endif /* CONFIG_SCHED_ALT */ + + /* + * Topology list, bottom-up. +@@ -1501,6 +1505,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl) + sched_domain_topology = tl; + } + ++#ifndef CONFIG_SCHED_ALT + #ifdef CONFIG_NUMA + + static const struct cpumask *sd_numa_mask(int cpu) +@@ -2371,3 +2376,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); + mutex_unlock(&sched_domains_mutex); + } ++#else /* CONFIG_SCHED_ALT */ ++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], ++ struct sched_domain_attr *dattr_new) ++{} ++ ++#ifdef CONFIG_NUMA ++int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; ++ ++int sched_numa_find_closest(const struct cpumask *cpus, int cpu) ++{ ++ return best_mask_cpu(cpu, cpus); ++} ++#endif /* CONFIG_NUMA */ ++#endif +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 62fbd09b5dc1..19f9a6185db3 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -120,6 +120,10 @@ static unsigned long long_max = LONG_MAX; + static int one_hundred = 100; + static int two_hundred = 200; + static int one_thousand = 1000; ++#ifdef CONFIG_SCHED_ALT ++static int __maybe_unused zero = 0; ++extern int sched_yield_type; ++#endif + #ifdef CONFIG_PRINTK + static int ten_thousand = 10000; + #endif +@@ -1652,6 +1656,24 @@ int proc_do_static_key(struct ctl_table *table, int write, + } + + static struct ctl_table kern_table[] = { ++#ifdef CONFIG_SCHED_ALT ++/* In ALT, only supported "sched_schedstats" */ ++#ifdef CONFIG_SCHED_DEBUG ++#ifdef CONFIG_SMP ++#ifdef CONFIG_SCHEDSTATS ++ { ++ .procname = "sched_schedstats", ++ .data = NULL, ++ .maxlen = sizeof(unsigned int), ++ .mode = 0644, ++ .proc_handler = sysctl_schedstats, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, ++#endif /* CONFIG_SCHEDSTATS */ ++#endif /* CONFIG_SMP */ ++#endif /* CONFIG_SCHED_DEBUG */ ++#else /* !CONFIG_SCHED_ALT */ + { + .procname = "sched_child_runs_first", + .data = &sysctl_sched_child_runs_first, +@@ -1854,6 +1876,7 @@ static struct ctl_table kern_table[] = { + .extra2 = SYSCTL_ONE, + }, + #endif ++#endif /* !CONFIG_SCHED_ALT */ + #ifdef CONFIG_PROVE_LOCKING + { + .procname = "prove_locking", +@@ -2430,6 +2453,17 @@ static struct ctl_table kern_table[] = { + .proc_handler = proc_dointvec, + }, + #endif ++#ifdef CONFIG_SCHED_ALT ++ { ++ .procname = "yield_type", ++ .data = &sched_yield_type, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &zero, ++ .extra2 = &two, ++ }, ++#endif + #if defined(CONFIG_S390) && defined(CONFIG_SMP) + { + .procname = "spin_retry", +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 5c9d968187ae..fe47db46303c 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -1940,8 +1940,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, + int ret = 0; + u64 slack; + ++#ifndef CONFIG_SCHED_ALT + slack = current->timer_slack_ns; + if (dl_task(current) || rt_task(current)) ++#endif + slack = 0; + + hrtimer_init_sleeper_on_stack(&t, clockid, mode); +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c +index 9abe15255bc4..691db8192ddb 100644 +--- a/kernel/time/posix-cpu-timers.c ++++ b/kernel/time/posix-cpu-timers.c +@@ -216,7 +216,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples) + u64 stime, utime; + + task_cputime(p, &utime, &stime); +- store_samples(samples, stime, utime, p->se.sum_exec_runtime); ++ store_samples(samples, stime, utime, tsk_seruntime(p)); + } + + static void proc_sample_cputime_atomic(struct task_cputime_atomic *at, +@@ -801,6 +801,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples, + } + } + ++#ifndef CONFIG_SCHED_ALT + static inline void check_dl_overrun(struct task_struct *tsk) + { + if (tsk->dl.dl_overrun) { +@@ -808,6 +809,7 @@ static inline void check_dl_overrun(struct task_struct *tsk) + __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); + } + } ++#endif + + static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard) + { +@@ -835,8 +837,10 @@ static void check_thread_timers(struct task_struct *tsk, + u64 samples[CPUCLOCK_MAX]; + unsigned long soft; + ++#ifndef CONFIG_SCHED_ALT + if (dl_task(tsk)) + check_dl_overrun(tsk); ++#endif + + if (expiry_cache_is_inactive(pct)) + return; +@@ -850,7 +854,7 @@ static void check_thread_timers(struct task_struct *tsk, + soft = task_rlimit(tsk, RLIMIT_RTTIME); + if (soft != RLIM_INFINITY) { + /* Task RT timeout is accounted in jiffies. RTTIME is usec */ +- unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ); ++ unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ); + unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); + + /* At the hard limit, send SIGKILL. No further action. */ +@@ -1086,8 +1090,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk) + return true; + } + ++#ifndef CONFIG_SCHED_ALT + if (dl_task(tsk) && tsk->dl.dl_overrun) + return true; ++#endif + + return false; + } +diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c +index 73ef12092250..24bf8ef1249a 100644 +--- a/kernel/trace/trace_selftest.c ++++ b/kernel/trace/trace_selftest.c +@@ -1052,10 +1052,15 @@ static int trace_wakeup_test_thread(void *data) + { + /* Make this a -deadline thread */ + static const struct sched_attr attr = { ++#ifdef CONFIG_SCHED_ALT ++ /* No deadline on BMQ/PDS, use RR */ ++ .sched_policy = SCHED_RR, ++#else + .sched_policy = SCHED_DEADLINE, + .sched_runtime = 100000ULL, + .sched_deadline = 10000000ULL, + .sched_period = 10000000ULL ++#endif + }; + struct wakeup_test_data *x = data; + diff --git a/linux-tkg-patches/5.13/0012-misc-additions.patch b/linux-tkg-patches/5.13/0012-misc-additions.patch new file mode 100644 index 0000000..2e058a6 --- /dev/null +++ b/linux-tkg-patches/5.13/0012-misc-additions.patch @@ -0,0 +1,133 @@ +From e5e77ad2223f662e1615266d8ef39a8db7e65a70 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Felix=20H=C3=A4dicke?= +Date: Thu, 19 Nov 2020 09:22:32 +0100 +Subject: HID: quirks: Add Apple Magic Trackpad 2 to hid_have_special_driver + list +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The Apple Magic Trackpad 2 is handled by the magicmouse driver. And +there were severe stability issues when both drivers (hid-generic and +hid-magicmouse) were loaded for this device. + +Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=210241 + +Signed-off-by: Felix Hädicke +--- + drivers/hid/hid-quirks.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c +index bf7ecab5d9e5..142e9dae2837 100644 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -478,6 +478,8 @@ static const struct hid_device_id hid_have_special_driver[] = { + #if IS_ENABLED(CONFIG_HID_MAGICMOUSE) + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) }, ++ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) }, + #endif + #if IS_ENABLED(CONFIG_HID_MAYFLASH) + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) }, +-- +cgit v1.2.3-1-gf6bb5 + +From e437ac931e89629f952ce9f3f9dfe45ac505cd0d Mon Sep 17 00:00:00 2001 +From: Joshua Ashton +Date: Tue, 5 Jan 2021 19:46:01 +0000 +Subject: [PATCH] drm/amdgpu: don't limit gtt size on apus + +Since commit 24562523688b ("Revert "drm/amd/amdgpu: set gtt size +according to system memory size only""), the GTT size was limited by +3GiB or VRAM size. + +This is problematic on APU systems with a small carveout +(notably, those that ship with dGPUs where this is unconfigurable), +where the carveout size can be as low as 128MiB. + +This makes it so the GTT size heuristic always uses 3/4ths of +the system memory size on APUs (limiting the size by 3GiB/VRAM size +only on devices with dedicated video memory). + +Fixes: 24562523688b ("Revert drm/amd/amdgpu: set gtt size according to +system memory size only") + +Signed-off-by: Joshua Ashton +--- + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 5 +++-- + drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 +++++++++--- + 2 files changed, 12 insertions(+), 5 deletions(-) + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +index 72efd579ec5e..a5a41e9272d6 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +@@ -192,8 +192,9 @@ module_param_named(gartsize, amdgpu_gart_size, uint, 0600); + + /** + * DOC: gttsize (int) +- * Restrict the size of GTT domain in MiB for testing. The default is -1 (It's VRAM size if 3GB < VRAM < 3/4 RAM, +- * otherwise 3/4 RAM size). ++ * Restrict the size of GTT domain in MiB for testing. The default is -1 (On APUs this is 3/4th ++ * of the system memory; on dGPUs this is 3GiB or VRAM sized, whichever is bigger, ++ * with an upper bound of 3/4th of system memory. + */ + MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)"); + module_param_named(gttsize, amdgpu_gtt_size, int, 0600); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +index 4d8f19ab1014..294f26f4f310 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +@@ -1865,9 +1865,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) + struct sysinfo si; + + si_meminfo(&si); +- gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), +- adev->gmc.mc_vram_size), +- ((uint64_t)si.totalram * si.mem_unit * 3/4)); ++ gtt_size = (uint64_t)si.totalram * si.mem_unit * 3/4; ++ /* If we have dedicated memory, limit our GTT size to ++ * 3GiB or VRAM size, whichever is bigger ++ */ ++ if (!(adev->flags & AMD_IS_APU)) { ++ gtt_size = min(max(AMDGPU_DEFAULT_GTT_SIZE_MB << 20, ++ adev->gmc.mc_vram_size), ++ gtt_size); ++ } + } + else + gtt_size = (uint64_t)amdgpu_gtt_size << 20; +-- +2.30.0 + +From f7f49141a5dbe9c99d78196b58c44307fb2e6be3 Mon Sep 17 00:00:00 2001 +From: Tk-Glitch +Date: Wed, 3 Feb 2021 11:20:12 +0200 +Subject: Revert "cpufreq: Avoid configuring old governors as default with intel_pstate" + +This is an undesirable behavior for us since our aggressive ondemand performs +better than schedutil for gaming when using intel_pstate in passive mode. +Also it interferes with the option to select the desired default governor we have. + +diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig +index 2c7171e0b0010..85de313ddec29 100644 +--- a/drivers/cpufreq/Kconfig ++++ b/drivers/cpufreq/Kconfig +@@ -71,7 +71,6 @@ config CPU_FREQ_DEFAULT_GOV_USERSPACE + + config CPU_FREQ_DEFAULT_GOV_ONDEMAND + bool "ondemand" +- depends on !(X86_INTEL_PSTATE && SMP) + select CPU_FREQ_GOV_ONDEMAND + select CPU_FREQ_GOV_PERFORMANCE + help +@@ -83,7 +84,6 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND + + config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE + bool "conservative" +- depends on !(X86_INTEL_PSTATE && SMP) + select CPU_FREQ_GOV_CONSERVATIVE + select CPU_FREQ_GOV_PERFORMANCE + help